input
stringlengths
2.65k
237k
output
stringclasses
1 value
psf_temp_over.shape ysh = int(yind - ypix/2) xsh = int(xind - xpix/2) fov_pix_over = trim_psf * osamp coeff = [] for im in psf_coeff: im = fshift(im, -xsh, -ysh, interp='cubic') im = pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over)) coeff.append(im) psf_coeff = np.array(coeff) psf_coeff_hdr['FOVPIX'] = trim_psf satlim = saturation_limits(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr, sp=sp, units=units, bp_lim=bp_lim, int_time=t_sat, full_well=well_level, well_frac=well_frac, verbose=verbose, **kwargs) return satlim def saturation_levels(self, sp, full_size=True, ngroup=2, image=None, **kwargs): """ Saturation levels Create image showing level of saturation for each pixel. Can either show the saturation after one frame (default) or after the ramp has finished integrating (ramp_sat=True). Parameters ---------- sp : :mod:`pysynphot.spectrum` A pysynphot spectral object (normalized). full_size : bool Expand (or contract) to size of detector array? If False, use fov_pix size. ngroup : int How many group times to determine saturation level? If this number is higher than the total groups in ramp, then a warning is produced. The default is ngroup=2, A value of 0 corresponds to the so-called "zero-frame," which is the very first frame that is read-out and saved separately. This is the equivalent to ngroup=1 for RAPID and BRIGHT1 observations. image : ndarray Rather than generating an image on the fly, pass a pre-computed slope image. Overrides `sp` and `full_size` """ assert ngroup >= 0 is_grism = self.is_grism t_frame = self.multiaccum_times['t_frame'] t_int = self.multiaccum_times['t_int'] if ngroup==0: t_sat = t_frame else: ma = self.multiaccum nf = ma.nf; nd1 = ma.nd1; nd2 = ma.nd2 t_sat = (nd1 + ngroup*nf + (ngroup-1)*nd2) * t_frame if t_sat>t_int: _log.warning('ngroup*t_group is greater than t_int.') # Slope image of input if image is not None: return image * t_sat / self.well_level else: image = self.calc_psf_from_coeff(sp=sp, return_oversample=False, return_hdul=False) if is_grism: wave, image = image if full_size: shape = (self.det_info['ypix'], self.det_info['xpix']) image = pad_or_cut_to_size(image, shape) # Add in zodi background to full image image += self.bg_zodi(**kwargs) # Well levels after "saturation time" sat_level = image * t_sat / self.well_level if is_grism: return (wave, sat_level) else: return sat_level def sensitivity(self, nsig=10, units=None, sp=None, verbose=False, **kwargs): """Sensitivity limits. Convenience function for returning the point source (and surface brightness) sensitivity for the given instrument setup. See `sensitivities` function for more details. Parameters ---------- nsig : int, float Desired nsigma sensitivity (default 10). units : str Output units (defaults to uJy for grisms, nJy for imaging). sp : :mod:`pysynphot.spectrum` Input spectrum to use for determining sensitivity. Only the spectral shape matters, unless ``forwardSNR=True``. verbose : bool Print result details. Keyword Args ------------ forwardSNR : bool Find the SNR of the input spectrum instead of sensitivity. zfact : float Factor to scale Zodiacal spectrum (default 2.5) ideal_Poisson : bool If set to True, use total signal for noise estimate, otherwise MULTIACCUM equation is used. rad_EE : float Extraction aperture radius (in pixels) for imaging mode. dw_bin : float Delta wavelength for spectral sensitivities (grisms & DHS). ap_spec : int, float Instead of dw_bin, specify the spectral extraction aperture in pixels. Takes priority over dw_bin. Value will get rounded up to nearest int. """ tf = self.multiaccum_times['t_frame'] det = self.Detector ktc = det.ktc rn = det.read_noise idark = det.dark_current p_excess = det.p_excess pupil_mask = '' if self.pupil_mask is None else self.pupil_mask kw1 = self.multiaccum.to_dict() kw2 = {'rn':rn, 'ktc':ktc, 'idark':idark, 'p_excess':p_excess} kwargs = merge_dicts(kwargs,kw1,kw2) if 'ideal_Poisson' not in kwargs.keys(): kwargs['ideal_Poisson'] = True # Always use the bg coeff psf_coeff = self._nrc_bg.psf_coeff psf_coeff_hdr = self._nrc_bg.psf_coeff_header.copy() fov_pix, osamp = (psf_coeff_hdr['FOVPIX'], psf_coeff_hdr['OSAMP']) # We don't necessarily need the entire image, so cut down to size for speed if (not ('WEAK LENS' in pupil_mask)) and (fov_pix > 33): fov_pix = 33 fov_pix_over = fov_pix * osamp psf_coeff = np.array([pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over)) for im in psf_coeff]) kwargs['fov_pix'] = fov_pix psf_coeff_hdr['FOVPIX'] = fov_pix bglim = sensitivities(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr, sp=sp, units=units, nsig=nsig, tf=tf, verbose=verbose, **kwargs) return bglim def bg_zodi(self, zfact=None, **kwargs): """Zodiacal background flux. There are options to call `jwst_backgrounds` to obtain better predictions of the background. Specify keywords `ra`, `dec`, and `thisday` to use `jwst_backgrounds`. Returned values are in units of e-/sec/pixel Parameters ---------- zfact : float Factor to scale Zodiacal spectrum (default 2.5) Keyword Args ------------ ra : float Right ascension in decimal degrees dec : float Declination in decimal degrees thisday : int Calendar day to use for background calculation. If not given, will use the average of visible calendar days. Notes ----- Representative values for zfact: * 0.0 - No zodiacal emission * 1.0 - Minimum zodiacal emission from JWST-CALC-003894 * 1.2 - Required NIRCam performance * 2.5 - Average (default) * 5.0 - High * 10.0 - Maximum """ # Dark image if self.is_dark: return 0 bp = self.bandpass waveset = bp.wave sp_zodi = zodi_spec(zfact, **kwargs) obs_zodi = S.Observation(sp_zodi, bp, waveset) fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2 # Recommend a zfact value if ra, dec, and thisday specified if 'ra' in kwargs.keys(): sp_zodi_temp = zodi_spec(zfact=1) obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset) fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2 zf_rec = fzodi_pix / fzodi_pix_temp str1 = 'Using ra,dec,thisday keywords can be relatively slow. \n' str2 = '\tFor your specified loc and date, we recommend using zfact={:.1f}'.format(zf_rec) _log.warn(str1 + str2) # Don't forget about Lyot mask attenuation (not in bandpass throughput) if self.is_lyot: fzodi_pix *= 0.19 return fzodi_pix def bg_zodi_image(self, zfact=None, frame='sci', **kwargs): """Zodiacal light image Returns an image of background Zodiacal light emission in e-/sec in specified coordinate frame. Parameters ---------- zfact : float Factor to scale Zodiacal spectrum (default 2.5) frame : str Return in 'sci' or 'det' coordinates? Keyword Args ------------ ra : float Right ascension in decimal degrees dec : float Declination in decimal degrees thisday : int Calendar day to use for background calculation. If not given, will use the average of visible calendar days. Notes ----- Representative values for zfact: * 0.0 - No zodiacal emission * 1.0 - Minimum zodiacal emission from JWST-CALC-003894 * 1.2 - Required NIRCam performance * 2.5 - Average (default) * 5.0 - High * 10.0 - Maximum """ detid = self.Detector.detid x0, y0 = (self.det_info['x0'], self.det_info['y0']) xpix, ypix = (self.det_info['xpix'], self.det_info['ypix']) # Dark image if self.is_dark: return np.zeros([ypix,xpix]) bp = self.bandpass waveset = bp.wave sp_zodi = zodi_spec(zfact, **kwargs) obs_zodi = S.Observation(sp_zodi, bp, waveset) fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2 # Get equivalent if 'ra' in kwargs.keys(): sp_zodi_temp = zodi_spec(zfact=1) obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset) fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2 zfact = fzodi_pix / fzodi_pix_temp _ = kwargs.pop('ra') _ = kwargs.pop('dec') _ = kwargs.pop('thisday') filter = self.filter pupil_mask = self.pupil_mask if self.is_grism: # sci coords im_bg = grism_background_image(filter, pupil=pupil_mask, module=self.module, sp_bg=sp_zodi, **kwargs) # Convert to det coords and crop im_bg = sci_to_det(im_bg, detid) im_bg = im_bg[y0:y0+ypix, x0:x0+xpix] # Back to sci coords im_bg = det_to_sci(im_bg, detid) elif self.is_coron or self.coron_substrate: # Create full image, then crop based on detector configuration im_bg = build_mask_detid(detid, oversample=1, pupil=pupil_mask, filter=self.filter) if im_bg is None: # In the event the specified detid has no coronagraphic mask # This includes ['A1', 'A3', 'B2', 'B4'] im_bg = np.ones([ypix,xpix]) else: # Convert to det coords and crop im_bg = sci_to_det(im_bg, detid) im_bg = im_bg[y0:y0+ypix, x0:x0+xpix] # Back to sci coords and multiply by e-/sec/pix im_bg = det_to_sci(im_bg, detid) # Multiply by e-/sec/pix im_bg *= self.bg_zodi(zfact, **kwargs) else: # No spatial structures for direct imaging an certain Lyot masks. im_bg = np.ones([ypix,xpix]) * self.bg_zodi(zfact, **kwargs) # Clear reference pixels # im_bg = sci_to_det(im_bg, detid) # mask_ref = self.Detector.mask_ref # im_bg[mask_ref] = 0 # im_bg = det_to_sci(im_bg, detid) if frame=='det': return sci_to_det(im_bg, detid) elif frame=='sci': return im_bg else: raise ValueError(f"frame {frame} not recognized. Use either 'sci' or 'det'.") def ramp_optimize(self, sp, sp_bright=None, is_extended=False, patterns=None, snr_goal=None, snr_frac=0.02, tacq_max=None, tacq_frac=0.1, well_frac_max=0.8, nint_min=1, nint_max=5000, ng_min=2, ng_max=None, return_full_table=False, even_nints=False, verbose=False, **kwargs): """Optimize ramp settings. Find
<gh_stars>1-10 #!/usr/bin/python # -*- coding: utf-8 -*- import wx.lib.layoutf as layoutf import traceback, wx, os #TODO Check to see if this is still needed. Don't think that it is now that eash func also calls app = wx.App() def set_icon(dlg, icon): if not icon: return if type(icon) is str: ico = wx.Icon(icon) dlg.SetIcon(ico) return if type(icon) == type(wx.Icon()): dlg.SetIcon(ico) return if hasattr(icon, "GetIcon"): dlg.SetIcon(icon.GetIcon()) return return #-----------------------------------------------------------------------------# # SelectOne # #-----------------------------------------------------------------------------# def SelectOne(title, msg, lst, size = (-1, -1), icon = None): ''' Show a list of strings. Arguments: title, message and a list of strings to be shown for selection. Return will be the selected string. ''' app = wx.App() dlg = wx.SingleChoiceDialog(None, msg, title, lst, wx.CHOICEDLG_STYLE) dlg.Size = size if icon: dlg.SetIcon(icon.GetIcon()) if dlg.ShowModal() == wx.ID_OK: sel = dlg.GetStringSelection() else: sel = None dlg.Destroy() del app return sel #-----------------------------------------------------------------------------# # SelectMult # #-----------------------------------------------------------------------------# def SelectMult(title, msg, lst, preselect=None, size = (-1, -1), icon = None): ''' Show a list of strings with a check box each. Args: title, message, list and an optional list of integers containing to indicate which items should appear as preselected. Return is a list of integers of the selected item index. ''' app = wx.App() dlg = wx.MultiChoiceDialog(None, msg, title, lst) if icon: dlg.SetIcon(icon.GetIcon()) if type(preselect) == type([]): dlg.SetSelections(preselect) dlg.Size = size if (dlg.ShowModal() == wx.ID_OK): selections = dlg.GetSelections() else: selections = None dlg.Destroy() del app return selections #-----------------------------------------------------------------------------# # DirDlg # #-----------------------------------------------------------------------------# def DirDlg(title="Choose a directory:", startdir = os.getcwd(), size =(-1, -1), icon = None): app = wx.App() dlg = wx.DirDialog(None, title, pos=(-1,-1), style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST | \ wx.DD_CHANGE_DIR) if icon: dlg.SetIcon(icon.GetIcon()) dlg.SetPath(startdir) dlg.Size = size if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() else: path = None dlg.Destroy() del app return path #-----------------------------------------------------------------------------# # OpenDlg # #-----------------------------------------------------------------------------# def OpenDlg(title="Choose files", mult = True, icon = None, startdir = os.getcwd(), wildcard = None, size = (-1, -1)): ''' Returns a list of selected files. ''' app = wx.App() if wildcard is None: wild = "Python Files (*.py*)|*.py*|" \ "All Files (*.*)|*.*" else: wild = wildcard if mult: dlg = wx.FileDialog(None, message = title, defaultDir = startdir, defaultFile = "", wildcard=wild, style = wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR) else: dlg = wx.FileDialog(None, message = title, defaultDir = startdir, defaultFile = "", wildcard = wild, style = wx.FD_OPEN | wx.FD_CHANGE_DIR) dlg.Size = size if icon: dlg.SetIcon(icon.GetIcon()) # Show the dialog and retrieve the user response. # If OK process data. if dlg.ShowModal() == wx.ID_OK: # This returns a Python list of files that were selected. paths = dlg.GetPaths() else: paths = None dlg.Destroy() del app return paths #-----------------------------------------------------------------------------# # ExcBox # #-----------------------------------------------------------------------------# def ExcBox(title="Exception"): ''' Return a message box with traceback content of the last exception. ''' app = wx.App() trc = traceback.format_exc() wx.MessageBox(trc, title) del app return #-----------------------------------------------------------------------------# # YesNoBox # #-----------------------------------------------------------------------------# def YesNoBox(title, msg="", icon = None): ''' Show a YES/NO box and return True or False. ''' app = wx.App() dlg = wx.MessageDialog(None, msg, title, wx.YES_NO | wx.ICON_QUESTION) if icon: dlg.SetIcon(icon.GetIcon()) result = dlg.ShowModal() dlg.Destroy() del app if result == wx.ID_YES: return True return False #-----------------------------------------------------------------------------# # InputBox # #-----------------------------------------------------------------------------# def InputBox(title, msg, default="", icon = None): ''' Returns: user entered string. None if user cancelled ''' app = wx.App() dlg = wx.TextEntryDialog(None, msg, title, default) if icon: dlg.SetIcon(icon.GetIcon()) if dlg.ShowModal() == wx.ID_OK: rc = dlg.GetValue() if not rc: rc = None else: rc = None dlg.Destroy() del app return rc #-----------------------------------------------------------------------------# # PasswordBox # #-----------------------------------------------------------------------------# def PasswordBox(title, msg, icon = None): ''' Returns: user entered password. None if user cancelled ''' app = wx.App() dlg = wx.PasswordEntryDialog(None, msg, title, defaultValue = wx.EmptyString) if icon: dlg.SetIcon(icon.GetIcon()) if dlg.ShowModal() == wx.ID_OK: rc = dlg.GetValue() if not rc: rc = None else: rc = None dlg.Destroy() del app return rc #-----------------------------------------------------------------------------# # MultInputBox # #-----------------------------------------------------------------------------# def MultInputBox(title, msg_text, Label, Feld, icon = None): ''' Show two lists: one with field labels and one with field contents. User entries will change the field contents. Can be used for simple data entries. ''' class MyDialog(wx.Dialog): def __init__(self, parent=None, msg="", caption="", pos=(-1,-1), size=(500,300), style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | \ wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX | \ wx.FULL_REPAINT_ON_RESIZE): wx.Dialog.__init__(self, parent, -1, caption, pos, size, style) app = wx.App() dlg = MyDialog() dlg.Position = (-1, -1) dlg.Title = title msg = wx.StaticText(dlg, -1, msg_text.ljust(100," ")) okay = wx.Button(dlg, wx.ID_OK) # OK btn okay.SetDefault() cancel = wx.Button(dlg, wx.ID_CANCEL) # CANCEL btn sizer = wx.BoxSizer(wx.VERTICAL) # Box Sizer sizer.Add(msg, 0, wx.ALL, 5) # Zeile 1 = explanation sizer.Add(wx.StaticLine(dlg), 0, wx.EXPAND|wx.ALL, 5) # then a line num_fields = len(Feld) if num_fields != len(Label): raise ValueError("unequal number of labels and fields") field_lbl = list(range(num_fields)) field_cont = list(range(num_fields)) fgs = wx.FlexGridSizer(rows=num_fields, cols=2, hgap=5, vgap=5) for i in range(num_fields): field_lbl[i] = wx.StaticText(dlg, -1, Label[i]) # label field_cont[i] = wx.TextCtrl(dlg) # content field_cont[i].Value = Feld[i] # fill in supplied fgs.Add(field_lbl[i], 0, wx.ALIGN_RIGHT) # label right aligned fgs.Add(field_cont[i], 0, wx.EXPAND) # expand content fgs.AddGrowableCol(1) sizer.Add(fgs, 0, wx.EXPAND|wx.ALL, 5) btns = wx.StdDialogButtonSizer() # define button sizer btns.AddButton(okay) btns.AddButton(cancel) btns.Realize() sizer.Add(btns, 0, wx.EXPAND|wx.ALL, 5) # add btn size if icon: dlg.SetIcon(icon.GetIcon()) dlg.SetSizer(sizer) sizer.Fit(dlg) dlg.Center() rc = dlg.ShowModal() if rc != wx.ID_OK: # do nothing dlg.Destroy() return None for i in range(num_fields): # put inputs back Feld[i] = field_cont[i].Value dlg.Destroy() del app return True #-----------------------------------------------------------------------------# # MsgBox # # TODO add option to play 'beep' sound. Currently ALWAYS beeping (annoying) # #-----------------------------------------------------------------------------# def MsgBox(title, msg): app = wx.App() wx.MessageBox(msg, title) del app return #-----------------------------------------------------------------------------# # BusyInfo # #-----------------------------------------------------------------------------# def BusyInfo(title, msg, image = None): ''' Show a "busy" message. Will not block but return the busy-object. Important: this will NEVER disappear - except when you delete this object! E.g. by setting busy = None oder del busy. ''' import wx.lib.agw.pybusyinfo as PBI app = wx.App() if not image: img = wx.NullBitmap elif type(image) == type(u""): if image.endswith(".ico"): icon = wx.Icon(image, wx.BITMAP_TYPE_ICO) img = wx.BitmapFromIcon(icon) else: img = wx.Bitmap(image, wx.BITMAP_TYPE_ANY) else: img = image.GetBitmap() busy = PBI.PyBusyInfo(msg, parent=None, title=title, icon=img) wx.Yield() return busy #-----------------------------------------------------------------------------# # CodeBoxFF # #-----------------------------------------------------------------------------# class CodeBoxFF(wx.Dialog): def __init__(self, parent, msg, caption, FF=True, fsize = 10, icon = None, pos=(-1,-1) , size=(500,300), style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER | \ wx.MAXIMIZE_BOX | wx.MINIMIZE_BOX | \ wx.FULL_REPAINT_ON_RESIZE): wx.Dialog.__init__(self, parent, -1, caption, pos, size, style) if icon: self.SetIcon(icon.GetIcon()) # always center on screen self.CenterOnScreen(wx.BOTH) self.text = text = wx.TextCtrl(self, -1, msg, style=wx.TE_MULTILINE | wx.TE_READONLY) # default 10-point fixed font (DejaVu Sans Mono) if FF: self.text.SetFont(wx.Font(fsize, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "DejaVu Sans Mono")) else: self.text.SetFont(wx.Font(fsize, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Calibri")) ok = wx.Button(self, wx.ID_OK, "OK") lc = layoutf.Layoutf('t=t5#1;b=t5#2;l=l5#1;r=r5#1', (self,ok)) text.SetConstraints(lc) lc = layoutf.Layoutf('b=b5#1;x%w50#1;w!80;h*', (self,)) ok.SetConstraints(lc) ok.SetDefault() self.SetAutoLayout(1) self.Layout() #-----------------------------------------------------------------------------# # CodeBox # #-----------------------------------------------------------------------------# def ScrollingTextbox(title, text = None, filename = None, size=(800,600), FF=True, icon = None): ''' Show contents of a file or arbitrary text lines in a scrollable windows. Argument msg may be a (list of) string. If starting with "file=", then the rest is interpreted as a file name. This file will be displayed then. Use FF to control use of a mono spaced vs. proportional font. ''' app = wx.App() if any((text and filename, not text and not filename)): raise ValueError("need exactly one of text or filename") if all((text, text in (list, tuple))): msg_d = "\n".join(text) elif filename: # den Inhalt einer Datei anzeigen try: # wenn das mal gut geht ... msg_d = open(filename).read() except: # hab's ja geahnt! msg_d = filename + "\ndoes not exist!" else: msg_d = text dlg = CodeBoxFF(None, msg_d, title, size=size, FF=FF, icon = icon) dlg.ShowModal() dlg.Destroy() del app return # ------------------------------------------------------------------------- # # ProgressMeter # # ------------------------------------------------------------------------- # class ProgessBar: ''' Display a Progress Meter without blocking Provides an early cancelling ''' def __init__(self, title, msg, maxItems, icon = None): self._app = wx.App() self._meter = wx.GenericProgressDialog(title, msg, maxItems, style=wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME | wx.PD_AUTO_HIDE | wx.PD_REMAINING_TIME | wx.PD_ESTIMATED_TIME) self.maxitems = maxItems self.lastitem = 0 set_icon(self._meter, icon) def update(self, msg, currentItemNumber): if self.lastitem >= self.maxitems: # we have already been closed return False if currentItemNumber > self.maxitems: # no exception if number too high self.lastitem = self.maxitems else: self.lastitem = currentItemNumber keepGoing, _ = self._meter.Update(self.lastitem,
import synapse.lib.module as s_module contracttypes = ( 'nda', 'other', 'grant', 'treaty', 'purchase', 'indemnity', 'partnership', ) class OuModule(s_module.CoreModule): def getModelDefs(self): modl = { 'types': ( ('ou:sic', ('str', {'regex': r'^[0-9]{4}$'}), { 'doc': 'The four digit Standard Industrial Classification Code.', 'ex': '0111', }), ('ou:naics', ('str', {'regex': r'^[1-9][0-9]{4}[0-9]?$'}), { 'doc': 'The five or six digit North American Industry Classification System code.', 'ex': '541715', }), ('ou:isic', ('str', {'regex': r'^[A-Z]([0-9]{2}[0-9]{0,2})?$'}), { 'doc': 'An International Standard Industrial Classification of All Economic Activities (ISIC) code.', 'ex': 'C1393', }), ('ou:org', ('guid', {}), { 'doc': 'A GUID for a human organization such as a company or military unit.', }), ('ou:orgtype', ('taxonomy', {}), { 'doc': 'An org type taxonomy.', 'interfaces': ('taxonomy',), }), ('ou:contract', ('guid', {}), { 'doc': 'An contract between multiple entities.', }), ('ou:conttype', ('taxonomy', {}), { 'doc': 'A contract type taxonomy.', 'interfaces': ('taxonomy',), }), ('ou:contract:type', ('str', {'enum': contracttypes}), { 'deprecated': True, 'doc': 'A pre-defined set of contract types.', }), ('ou:industry', ('guid', {}), { 'doc': 'An industry classification type.', }), ('ou:alias', ('str', {'lower': True, 'regex': r'^[0-9a-z_]+$'}), { 'doc': 'An alias for the org GUID.', 'ex': 'vertexproject', }), ('ou:hasalias', ('comp', {'fields': (('org', 'ou:org'), ('alias', 'ou:alias'))}), { 'deprecated': True, 'doc': 'The knowledge that an organization has an alias.', }), ('ou:orgnet4', ('comp', {'fields': (('org', 'ou:org'), ('net', 'inet:net4'))}), { 'doc': "An organization's IPv4 netblock.", }), ('ou:orgnet6', ('comp', {'fields': (('org', 'ou:org'), ('net', 'inet:net6'))}), { 'doc': "An organization's IPv6 netblock.", }), ('ou:name', ('str', {'lower': True, 'strip': True}), { 'doc': 'The name of an organization. This may be a formal name or informal name of the ' 'organization.', 'ex': 'acme corporation', }), ('ou:member', ('comp', {'fields': (('org', 'ou:org'), ('person', 'ps:person'))}), { 'deprecated': True, 'doc': 'Deprecated. Please use ou:position.', }), ('ou:position', ('guid', {}), { 'doc': 'A position within an org. May be organized into an org chart.', }), ('ou:suborg', ('comp', {'fields': (('org', 'ou:org'), ('sub', 'ou:org'))}), { 'doc': 'Any parent/child relationship between two orgs. May represent ownership, organizational structure, etc.', }), ('ou:org:has', ('comp', {'fields': (('org', 'ou:org'), ('node', 'ndef'))}), { 'deprecated': True, 'doc': 'An org owns, controls, or has exclusive use of an object or resource, ' 'potentially during a specific period of time.', }), ('ou:user', ('comp', {'fields': (('org', 'ou:org'), ('user', 'inet:user'))}), { 'doc': 'A user name within an organization.', }), ('ou:role', ('str', {'lower': True, 'regex': r'^\w+$'}), { 'ex': 'staff', 'doc': 'A named role when participating in an event.', }), ('ou:attendee', ('guid', {}), { 'doc': 'A node representing a person attending a meeting, conference, or event.', }), ('ou:meet', ('guid', {}), { 'doc': 'An informal meeting of people which has no title or sponsor. See also: ou:conference.', }), ('ou:preso', ('guid', {}), { 'doc': 'A webinar, conference talk, or other type of presentation.', }), ('ou:meet:attendee', ('comp', {'fields': (('meet', 'ou:meet'), ('person', 'ps:person'))}), { 'deprecated': True, 'doc': 'Deprecated. Please use ou:attendee.', }), ('ou:conference', ('guid', {}), { 'doc': 'A conference with a name and sponsoring org.', }), ('ou:conference:attendee', ('comp', {'fields': (('conference', 'ou:conference'), ('person', 'ps:person'))}), { 'deprecated': True, 'doc': 'Deprecated. Please use ou:attendee.', }), ('ou:conference:event', ('guid', {}), { 'doc': 'A conference event with a name and associated conference.', }), ('ou:conference:event:attendee', ('comp', {'fields': (('conference', 'ou:conference:event'), ('person', 'ps:person'))}), { 'deprecated': True, 'doc': 'Deprecated. Please use ou:attendee.', }), ('ou:contest', ('guid', {}), { 'doc': 'A competitive event resulting in a ranked set of participants.', }), ('ou:contest:result', ('comp', {'fields': (('contest', 'ou:contest'), ('participant', 'ps:contact'))}), { 'doc': 'The results from a single contest participant.', }), ('ou:goal', ('guid', {}), { 'doc': 'An assessed or stated goal which may be abstract or org specific.', }), ('ou:hasgoal', ('comp', {'fields': (('org', 'ou:org'), ('goal', 'ou:goal'))}), { 'doc': 'An org has an assessed or stated goal.', }), ('ou:camptype', ('taxonomy', {}), { 'doc': 'An campaign type taxonomy.', 'interfaces': ('taxonomy',), }), ('ou:campaign', ('guid', {}), { 'doc': 'Represents an orgs activity in pursuit of a goal.', }), ('ou:id:type', ('guid', {}), { 'doc': 'A type of id number issued by an org.', }), ('ou:id:value', ('str', {'strip': True}), { 'doc': 'The value of an org:id:number.', }), ('ou:id:number', ('comp', {'fields': (('type', 'ou:id:type'), ('value', 'ou:id:value'))}), { 'doc': 'A unique id number issued by a specific organization.', }), ('ou:id:update', ('guid', {}), { 'doc': 'A status update to an org:id:number.', }), ('ou:award', ('guid', {}), { 'doc': 'An award issued by an organization.', }), ('ou:vitals', ('guid', {}), { 'doc': 'Vital statistics about an org for a given time period.', }), ('ou:opening', ('guid', {}), { 'doc': 'A job/work opening within an org.', }), ('ou:jobtype', ('taxonomy', {}), { 'ex': 'it.dev.python', 'doc': 'A title for a position within an org.', 'interfaces': ('taxonomy',), }), ('ou:employment', ('taxonomy', {}), { 'ex': 'fulltime.salary', 'doc': 'An employment type taxonomy.', 'interfaces': ('taxonomy',), }), ('ou:jobtitle', ('str', {'lower': True, 'strip': True, 'onespace': True}), { 'doc': 'A title for a position within an org.', }), ), 'forms': ( ('ou:jobtype', {}, ()), ('ou:jobtitle', {}, ()), ('ou:employment', {}, ()), ('ou:opening', {}, ( ('org', ('ou:org', {}), { 'doc': 'The org which has the opening.', }), ('orgname', ('ou:name', {}), { 'doc': 'The name of the organization as listed in the opening.', }), ('orgfqdn', ('inet:fqdn', {}), { 'doc': 'The FQDN of the organization as listed in the opening.', }), ('posted', ('time', {}), { 'doc': 'The date/time that the job opening was posted.', }), ('removed', ('time', {}), { 'doc': 'The date/time that the job opening was removed.', }), ('postings', ('array', {'type': 'inet:url', 'uniq': True, 'sorted': True}), { 'doc': 'URLs where the opening is listed.', }), ('contact', ('ps:contact', {}), { 'doc': 'The contact details to inquire about the opening.', }), ('loc', ('loc', {}), { 'doc': 'The geopolitical boundary of the opening.', }), ('jobtype', ('ou:jobtype', {}), { 'doc': 'The job type taxonomy.', }), ('employment', ('ou:employment', {}), { 'doc': 'The type of employment.', }), ('jobtitle', ('ou:jobtitle', {}), { 'doc': 'The title of the opening.', }), ('remote', ('bool', {}), { 'doc': 'Set to true if the opening will allow a fully remote worker.', }), ('yearlypay', ('econ:price', {}), { 'doc': 'The yearly income associated with the opening.', }), ('paycurrency', ('econ:currency', {}), { 'doc': 'The currency that the yearly pay was delivered in.', }), # TODO a way to encode/normalize requirements. )), ('ou:vitals', {}, ( ('asof', ('time', {}), { 'doc': 'The time that the vitals represent.', }), # TODO is modulo time a type? #('period', ('sec', 'min', 'hour', 'day', 'week', 'month', 'quarter', 'year' ('org', ('ou:org', {}), { 'doc': 'The resolved org.', }), ('orgname', ('ou:name', {}), { 'doc': 'The org name as reported by the source of the vitals.', }), ('orgfqdn', ('inet:fqdn', {}), { 'doc': 'The org FQDN as reported by the source of the vitals.', }), ('currency', ('econ:currency', {}), { 'doc': 'The currency of the econ:price values.', }), ('costs', ('econ:price', {}), { 'doc': 'The costs/expendatures over the period.', }), ('revenue', ('econ:price', {}), { 'doc': 'The gross revenue over the period.', }), ('profit', ('econ:price', {}), { 'doc': 'The net profit over the period.', }), ('valuation', ('econ:price', {}), { 'doc': 'The assesed value of the org.', }), ('shares', ('int', {}), { 'doc': 'The number of shares outstanding.', }), ('population', ('int', {}), { 'doc': 'The population of the org.', }), ('delta:costs', ('econ:price', {}), { 'doc': 'The change in costs over last period.', }), ('delta:revenue', ('econ:price', {}), { 'doc': 'The change in revenue over last period.', }), ('delta:profit', ('econ:price', {}), { 'doc': 'The change in profit over last period.', }), ('delta:valuation', ('econ:price', {}), { 'doc': 'The change in valuation over last period.', }), ('delta:population', ('int', {}), { 'doc': 'The change in population over last period.', }), )), ('ou:award', {}, ( ('name', ('str', {'lower': True, 'strip': True, 'onespace': True}), { 'doc': 'The name of the award.', 'ex': 'Bachelors of Science', }), ('type', ('str', {'lower': True, 'strip': True, 'onespace': True}), { 'doc': 'The type of award.', 'ex': 'certification', }), ('org', ('ou:org', {}), { 'doc': 'The organization which issues the award.', }), )), ('ou:id:type', {}, ( ('org', ('ou:org', {}), { 'doc': 'The org which issues id numbers of this type.', }), ('name', ('str', {}), { 'doc': 'The friendly name of the id number type.', }), )), ('ou:id:number', {}, ( ('type', ('ou:id:type', {}), { 'doc': 'The
self.x if self.X.shape[1] <= self.rc: # we have to expand X with zero columns #Y = np.zeros((self.X.shape[0], self.rc - self.X.shape[1] + 1)) #self.X = np.c_[self.X, Y] dx = self.rc - self.X.shape[1] + 1 self.X = np.pad(self.X, ((0, 0), (0, dx)), "constant") self.X[:, self.rc] = self.x self.na = self.X.shape[1] if False: # never applies before many stucks # check hull xinf = self.X.min(axis=1) xsup = self.X.max(axis=1) qhull = max((xsup - xinf) / (abs(xsup) + abs(xinf) + realmin)) if qhull < 1e-8: # affine restart self.X = self.xbest[:, np.newaxis] self.F = np.array([[self.fbest]]) # update row and column rc of function matrix if self.F.shape[0] < self.rc + 1 or self.F.shape[1] < self.rc + 1: self.F = np.pad(self.F, ((0, self.rc + 1 - self.F.shape[0]), (0, self.rc + 1 - self.F.shape[1])), "constant") self.F[self.rc, self.rc] = self.f for iu in range(self.na): if iu == self.rc: continue self.x = (self.xrc + self.X[:, iu]) / 2 # evaluate function self.vxeval() if self.f < self.fbest: # update best point information self.nfbest = self.nfused self.xbest = self.x self.fbest = self.f self.log.debug('improvement in pair step') self.F[self.rc, iu] = self.f self.F[iu, self.rc] = self.f # now f(Xs)=(2*s'*F-diag(F)')*s if sum(s)=1 # find index of best basis point x_rc self.d = d = self.F.diagonal() # np.diag(F) self.rc = np.argmin(d) self.frc = d[self.rc] # toggle direction type to be tried trydirmax = False if self.trydir is None: self.trydir = 0 else: self.trydir += 1 if self.trydir > trydirmax: self.trydir = 0 if self.trydir == 0: self.vxnewton() # safeguarded Newton direction else: self.vxcov() # covariance matrix based direction # search direction p = self.X.dot(self.s) - self.xbest # s from vxnewton if np.all(p == 0): return # line search self.vxline(p, self.alp, keepalp=False) if self.done: return def vxnewton(self): """ find safeguarded Newton direction """ # negative gradient at best basis point x_rc b = self.d / 4 - self.F[:, self.rc] # steepest descent direction sS = b - np.mean(b) sS = sS / (LA.norm(sS, np.inf) + realmin) # Newton direction (is a good spanning direction even when singular) # warning off #ona = np.ones((self.na, 1)) delta = LA.norm(self.d - self.frc, np.inf) #delta = delta[ona, 1] delta = np.repeat(delta, self.na).reshape(self.na, -1) # sN=[F-frc delta;delta' 0]\[b;0]' sN_A = np.vstack(( np.hstack((self.F - self.frc, delta)), # instead of delta.T, make it 1d horizontal vector! np.hstack((delta.reshape(-1), 0)) )) try: sN = LA.solve(sN_A, np.r_[b, 0]) except LA.linalg.LinAlgError: sN = np.ones((b.shape[0] + 1,)) * np.inf sN = sN[:-1] # remove last column sN = sN - np.mean(sN) sN = sN / LA.norm(sN, np.inf) # warning on if np.all(np.isfinite(sN)): # minimize in the 2D subspace spanned by sS and sN # f(xnew+Qt) = fnew - 4 c2^T t + 2 t^T G2 t Q = np.vstack((sS, sN)) c2 = Q.dot(b) G2 = Q.dot(self.F).dot(Q.T) t, self.nalp = self.vx2quad(c2, G2, self.n * self.defacc) s = Q.T.dot(t) else: # unstable Newton direction, use only steepest descent s = sS # shift to affine coordinates s[self.rc] = s[self.rc] + 1 self.s = s self.falp = np.nan # function value at new point unknown; return s def vx2quad(self, c, G, delta): """ minimizes 2D problem f(x) = - 2 c^T x + x^T G x with safeguards in the nearly indefinite case. delta>=0 is a definiteness threshold The optimum is at x=2^nalp*t, where t in [-1,1]^2 and nalp=1 iff either G is nearly infinite, or the definite optimizer is in [-1,1]^2. :param c: :param G: :param delta: :return: """ definite = False if G[0, 0] > 0: detG = G[0, 0] * G[1, 1] - G[0, 1] ** 2 if detG > delta * G[0, 1] ** 2: # G is sufficiently positive definite definite = True # try unconstrained minimizer by Cramer's rule t = np.array( [c[0] * G[1, 1] - c[1] * G[1, 0], G[0, 0] * c[1] - G[0, 1] * c[0]]) self.t = t / detG tn = LA.norm(t, np.inf) if not np.isfinite(tn): # 111 # no definite direction found self.log.debug('definite, but no definite direction found') definite = False elif tn <= 2: # 222 self.nalp = 1 else: # 333 # rescale direction self.nalp = np.ceil(np.log2(tn)) t = t * 2 ** (1 - self.nalp) self.f2 = t.dot(G.dot(t) / 2 - c) if not definite: # 444 # G is not positive definite # find minimum constrained to the box [-1,1]^2 f2 = 0 # objective value at t=0 for k in [0, 1]: i = 1 - k for tk in [-1, 1]: ci = c[i] - tk * G[i, k] Gii = G[i, i] # minimize -ci ti + Gii ti^2/2 s.t. ti in [-1,1] if Gii > np.abs(ci): ti = ci / Gii fti = -ci * ti / 2 elif ci >= 0: ti = 1 fti = -ci + Gii / 2 else: ti = -1 fti = ci + Gii / 2 if fti < f2: f2 = fti self.t = np.zeros((2, 1)) self.t[i, 0] = ti self.t[k, 0] = tk if f2 >= 0: # no good direction found self.log.info('bad fti: G:%s, c:%s' % (G, c)) self.t = np.array([[1], [1]]) self.nalp = 1 return self.t, self.nalp self.nalp = 10 return self.t, self.nalp def vxcov(self): raise Exception("does not exist") def vxgline(self, coord): """ global line search decreasing fcn(xbest+alp*p) coord: % k for coordinate search in direction k, % `None` for random direction search """ self.nalp = 10 # number of uniform points tried (>=4) # the random scale recipe can be tuned as well ub = self.upper_bounds lb = self.lower_bounds if coord is not None and coord >= 0: # coordinate direction pp = np.zeros((self.n, 1)) pp[coord] = ub[coord] - lb[coord] ind = np.all(np.isfinite(pp)) pp[ind] = 1 else: # random search direction pp = 2 * (self.vxrand(lb, ub) - self.xbest) # convert n x 1 - vectors to an n-vector pp = pp.ravel() # search range alp in [-1,1] n2 = np.fix(self.nalp / 2) self.nalp = int(2 * n2 + 1) # global grid search glgood = 0 for rep in range(10): fgline = self.fbest # random scale r = np.random.random() ** 2 p = pp * r asorted = ( np.arange(-n2, n2 + 1) + 0.8 * np.random.random(self.nalp) - 0.4) / n2 asorted[n2 + 1] = 0 fsorted = np.inf * asorted x0 = self.xbest for kk in range(self.nalp): alp = asorted[kk] if alp == 0: fsorted[kk] = self.fbest continue # function evaluation and list management self.x = x0 + asorted[kk] * p self.vxeval() # evaluate f=f(x) and keep books if self.done: break fsorted[kk] = self.f if self.f < self.fbest: # update best point information self.fbest = self.f self.xbest = self.x self.nfbest = self.nfused kbest = np.argmin(fsorted) ffbest = fsorted[kbest] if self.fbest < fgline: # best point moved on the grid glgood = glgood + 1 else: break # now the best grid point is at alp=0 bracket = 1 nblist = 0 blist = [] fblist = [] for kk in range(2, self.nalp - 2): f0 = fsorted[kk] f1 = fsorted[kk - 1] f2 = fsorted[kk + 1] if f0 > min(f1, f2): # not a local minimizer, do nothing continue # safeguarded quadratic interpolation step a00 = asorted[kk] a1 = asorted[kk - 1] - a00 a2 = asorted[kk + 1] - a00 self.vxquad() anew = [int(alp + a00)] # piecewise linear interpolation steps kink = -1 kink = self.vxkink(kink, kk, asorted, fsorted) if kink: anew.append(self.alp) kink = +1 kink = self.vxkink(kink, kk, asorted, fsorted) if kink: anew.append(self.alp) for alp in anew: # function evaluation and list management self.x = x0 + np.dot(alp, p) self.vxeval() # evaluate f=f(x) and keep books if self.done: break if self.f < self.fbest: # update best point information self.fbest = self.f self.xbest = self.x self.nfbest = self.nfused #nblist = nblist + 1 blist.append(alp) fblist.append(self.f) if glgood > 0: self.trygline
<gh_stars>0 import unittest from math import pi import numpy as np from wisdem.ccblade.Polar import Polar, blend class TestBlend(unittest.TestCase): def setUp(self): alpha = [ -3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25, ] cl = [ -0.071, 0.044, 0.144, 0.241, 0.338, 0.435, 0.535, 0.632, 0.728, 0.813, 0.883, 0.946, 1.001, 1.054, 1.056, 1.095, 1.138, 1.114, 1.073, 1.008, 0.95, 0.902, 0.795, 0.797, 0.8, ] cd = [ 0.0122, 0.0106, 0.0114, 0.0134, 0.0136, 0.014, 0.0147, 0.0156, 0.0162, 0.0173, 0.0191, 0.0215, 0.0248, 0.0339, 0.0544, 0.0452, 0.0445, 0.067, 0.0748, 0.1028, 0.1473, 0.2819, 0.2819, 0.2819, 0.3, ] cm = [ -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155, -0.1068, -0.0981, -0.0894, -0.0807, ] Re = 1 self.polar1 = Polar(Re, alpha, cl, cd, cm) alpha = [ -3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18, 15.189, 16.17, 17.14, 18.06, 19.06, 20.07, 21.08, 22.09, 23.1, 25, ] cl = [ -0.0852, 0.0528, 0.1728, 0.2892, 0.4056, 0.522, 0.642, 0.7584, 0.8736, 0.9756, 1.0596, 1.1352, 1.2012, 1.2648, 1.2672, 1.314, 1.3656, 1.3368, 1.2876, 1.2096, 1.14, 1.0824, 0.954, 0.9564, 1, 1.2, 1.4, 1.6, ] cd = [ 0.01464, 0.01272, 0.01368, 0.01608, 0.01632, 0.0168, 0.01764, 0.01872, 0.01944, 0.02076, 0.02292, 0.0258, 0.02976, 0.04068, 0.06528, 0.05424, 0.0534, 0.0804, 0.08976, 0.12336, 0.17676, 0.33828, 0.33828, 0.33828, 0.35, 0.4, 0.45, 0.5, ] cm = [ -0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633, ] self.polar2 = Polar(Re, alpha, cl, cd, cm) def test_blend1(self): polar3 = blend(self.polar1, self.polar2, 0.5) alpha_blend = [ -3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25, ] cl_blend = [ -0.078, 0.048, 0.158, 0.265, 0.372, 0.479, 0.589, 0.695, 0.801, 0.894, 0.971, 1.041, 1.101, 1.159, 1.162, 1.205, 1.252, 1.225, 1.181, 1.109, 1.045, 0.992, 0.875, 0.877, 1.200, ] cd_blend = [ 0.0134, 0.0117, 0.0125, 0.0147, 0.0150, 0.0154, 0.0162, 0.0172, 0.0178, 0.0190, 0.0210, 0.0237, 0.0273, 0.0373, 0.0598, 0.0497, 0.0490, 0.0737, 0.0822, 0.1131, 0.1620, 0.3101, 0.3101, 0.3101, 0.4000, ] cm_blend = [ -0.00405, -0.00475, -0.00165, -0.0099, -0.0249, -0.0314, -0.03755, -0.043, -0.0481, -0.04555, -0.03625, -0.0301, -0.02825, -0.0303, -0.03415, -0.0362, -0.0378, -0.03955, -0.06905, -0.11125, -0.11985, -0.11115, -0.10245, -0.09375, -0.072, ] # re-interpolate b/c angles of attack are different cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl) cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd) cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm) # should be within 1e-3 np.testing.assert_allclose(cl3, cl_blend, atol=1e-3) np.testing.assert_allclose(cd3, cd_blend, atol=1e-3) np.testing.assert_allclose(cm3, cm_blend, atol=1e-3) def test_blend2(self): polar3 = blend(self.polar1, self.polar2, 0.7) alpha_blend = [ -3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25, ] cl_blend = [ -0.081, 0.050, 0.164, 0.275, 0.385, 0.496, 0.610, 0.720, 0.830, 0.927, 1.007, 1.078, 1.141, 1.202, 1.204, 1.248, 1.297, 1.270, 1.224, 1.149, 1.083, 1.028, 0.906, 0.909, 1.360, ] cd_blend = [ 0.0139, 0.0121, 0.0130, 0.0153, 0.0155, 0.0160, 0.0168, 0.0178, 0.0185, 0.0197, 0.0218, 0.0245, 0.0283, 0.0386, 0.0620, 0.0515, 0.0507, 0.0764, 0.0852, 0.1172, 0.1679, 0.3214, 0.3214, 0.3214, 0.4400, ] cm_blend = [ -0.00391, -0.00461, -0.00303, -0.00522, -0.02358, -0.03012, -0.03637, -0.042, -0.04706, -0.04761, -0.03791, -0.0309, -0.02819, -0.02954, -0.03337, -0.03616, -0.0372, -0.03945, -0.057347, -0.10607, -0.12159, -0.11289, -0.10419, -0.09549, -0.06852, ] # re-interpolate b/c angles of attack are different cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl) cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd) cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm) # should be within 1e-3 np.testing.assert_allclose(cl3, cl_blend, atol=1e-3) np.testing.assert_allclose(cd3, cd_blend, atol=1e-3) np.testing.assert_allclose(cm3, cm_blend, atol=1e-3) def test_blend3(self): polar3 = blend(self.polar1, self.polar2, 0.2) alpha_blend = [ -3.04, -2.03, -1.01, 0.01, 1.03, 2.05, 3.07, 4.09, 5.11, 6.13, 7.14, 8.16, 9.17, 10.18, 11.18, 12.19, 13.18, 14.18, 15.18, 16.17, 17.14, 18.06, 19.06, 20.07, 25, ] cl_blend = [ -0.074, 0.046, 0.150, 0.251, 0.352, 0.452, 0.556, 0.657, 0.757, 0.846, 0.918, 0.984, 1.041, 1.096, 1.098, 1.139, 1.184, 1.159, 1.116, 1.048, 0.988, 0.938, 0.827, 0.829, 0.960, ] cd_blend = [ 0.0127, 0.0110, 0.0119, 0.0139, 0.0141, 0.0146, 0.0153, 0.0162, 0.0168, 0.0180, 0.0199, 0.0224, 0.0258, 0.0353, 0.0566, 0.0470, 0.0463, 0.0697, 0.0778, 0.1069, 0.1532, 0.2932, 0.2932, 0.2932, 0.3400, ] cm_blend = [ -0.00426, -0.00496, 0.00042, -0.01692, -0.02688, -0.03332, -0.03932, -0.0445, -0.04966, -0.04246, -0.03376, -0.0289, -0.02834, -0.03144, -0.03532, -0.03626, -0.0387, -0.0397, -0.0866, -0.11902, -0.11724, -0.10854, -0.09984, -0.09114, -0.07722, ] # re-interpolate b/c angles of attack are different cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl) cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd) cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm) # should be within 1e-3 np.testing.assert_allclose(cl3, cl_blend, atol=1e-3) np.testing.assert_allclose(cd3, cd_blend, atol=1e-3) np.testing.assert_allclose(cm3, cm_blend, atol=1e-3) class Test3DStall(unittest.TestCase): def setUp(self): alpha = [ -9.000, -8.000, -7.000, -6.000, -5.000, -4.000, -3.000, -2.000, -1.000, 0.000, 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, 8.000, 9.000, 10.000, 11.000, 12.000, 13.000, 14.000, 15.000, 16.000, 17.000, 18.000, 19.000, 20.000, 30.000, 40.000, 50.000, ] cl = [ -0.802, -0.721, -0.611, -0.506, -0.408, -0.313, -0.220, -0.133, -0.060, 0.036, 0.227, 0.342, 0.436, 0.556, 0.692, 0.715, 0.761, 0.830, 0.893, 0.954, 1.013, 1.042, 1.061, 1.083, 1.078, 0.882, 0.811, 0.793, 0.793, 0.798, 0.772, 0.757, 0.700, ] cd = [ 0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027, 0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024, 0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114, 0.128, 0.142, 0.155, 0.321, 0.525, 0.742, ] cm = [ -0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346, -0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284, -0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242, -0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633, -0.054, -0.045, -0.036, -0.22, -0.13, ] cm_zeros = np.zeros(len(cm)) Re = 1 self.polar = Polar(Re, alpha, cl, cd, cm) self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros) def test_stall1(self): R = 2.4 r = 0.25 * R chord = 0.18 Omega = 200 * pi / 30 Uinf = 10.0 tsr = Omega * R / Uinf newpolar = self.polar.correction3D( r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4 ) cl_3d = [-0.84628298, -0.75228154, -0.64170322, -0.53398298, -0.43026406, -0.32825998, -0.22739914, -0.12996799, -0.04053948, 0.06203622, 0.21891545, 0.33235184, 0.4337843 , 0.55007878, 0.67551912, 0.73636683, 0.81036171, 0.89750377, 0.98121612, 1.06378525, 1.14521114, 1.20948854, 1.26804979, 1.32832588, 1.328 , 1.132 , 1.061 , 1.043 , 1.043 , 1.048 , 0.9595 , 0.8195 , 0.7] cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027, 0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024, 0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114, 0.128, 0.142, 0.155, 0.321, 0.525, 0.742] # test equality np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3, rtol=1e-3) np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3, rtol=1e-3) def test_stall2(self): R = 2.4 r = 0.75 * R chord = 0.28 Omega = 200 * pi / 30 Uinf = 14.0 tsr = Omega * R / Uinf newpolar = self.polar.correction3D( r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4 ) cl_3d = [-0.81312305, -0.72885733, -0.61871207, -0.5130288 , -0.41359231, -0.31683302, -0.22185852, -0.13223842, -0.05511188, 0.04253981, 0.22496931, 0.33957657, 0.43544346, 0.5545127 , 0.68786031, 0.72036695, 0.77339873, 0.84695567, 0.91515823, 0.98157599, 1.04620895, 1.08406997, 1.113007 , 1.14462124, 1.15214072, 0.98921218, 0.93783339, 0.9337517 , 0.94573318, 0.96217664, 0.9595 , 0.8195 , 0.7] cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027, 0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024, 0.027, 0.031, 0.037, 0.046, 0.058, 0.074, 0.088, 0.101, 0.114, 0.128, 0.142, 0.155, 0.321, 0.525, 0.742] # test equality np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3) np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3) def test_stall3(self): R = 5.0 r = 0.5 * R chord = 0.5 Omega = 100 * pi / 30 Uinf = 10.0 tsr = Omega * R / Uinf newpolar = self.polar.correction3D( r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4 ) cl_3d = [-0.82374342, -0.73635957, -0.62607561, -0.51973994, -0.41893189, -0.32049281, -0.22363306, -0.13151125, -0.05044467, 0.04878406, 0.2230304 , 0.33726265, 0.43491207, 0.55309262, 0.68390771, 0.72549134, 0.78523713, 0.86314507, 0.93631506, 1.00790573, 1.07791708, 1.12423867, 1.16266366, 1.20345763, 1.22293081, 1.09157913, 1.05893482, 1.043 , 1.043 , 1.048 , 0.9595 , 0.8195 , 0.7] cd_3d = [0.027, 0.025, 0.024,
import networkx as nx import matplotlib.pyplot as plt class Population: def __init__(self, name, n, e, w): """ :param name: :param n: # of neurons (estimated from density and cortical area from Markov) :param e: # of extrinsic inputs per neuron (typical values by cortical layer) :param w: receptive field width in deg visual angle (various sources) """ self.name = name self.n = n self.e = e self.w = w def get_description(self): return "{} (#neurons={}; in-degree={}; RF-width={})".format(self.name, self.n, self.e, self.w) def is_input(self): """ :return: True if this population is an input to the model (this is true of the # extrinsic inputs per neuron is 0); False otherwise """ return not self.e # TODO: do I really need subclasses or can I have f and b, possibly None, f ignored if b not None? class Projection: def __init__(self, origin, termination): """ :param origin: presynaptic Population :param termination: postsynaptic Population """ self.origin = origin self.termination = termination def get_description(self): return "{}->{}".format(self.origin.name, self.termination.name) class InterAreaProjection(Projection): def __init__(self, origin, termination, f): """ :param origin: presynaptic Population :param termination: postsynaptic Population :param f: fraction of all neurons that project to termination that are from origin (from Markov et al., 2012) """ Projection.__init__(self, origin, termination) self.f = f def get_description(self): return "{} (FLNe={})".format(super().get_description(), self.f) class InterLaminarProjection(Projection): def __init__(self, origin, termination, b): """ :param origin: presynaptic Population :param termination: postsynaptic Population :param b: mean number of synapses onto a single postsynaptic neuron from the presynaptic neuron population (b is for Binzegger, since we take this from Binzegger et al., 2004, Figures 7 and 8) """ Projection.__init__(self, origin, termination) self.b = b def get_description(self): return "{} (synapses-per-target={})".format(Projection.get_description(self), self.b) class System: def __init__(self, min_f=1e-6): self.min_f = min_f self.input_name = "INPUT" self.populations = [] self.projections = [] def add_input(self, n, w): """ Adds a special population that represents the network input. If a parameter value is unknown, it should be given as None. :param n: number of units :param w: width of an image pixel in degrees visual angle :param name (optional): Defaults to 'INPUT' """ self.populations.append(Population(self.input_name, n, None, w)) def add(self, name, n, e, w): if self.find_population(name) is not None: raise Exception(name + " already exists in network") self.populations.append(Population(name, n, e, w)) def connect_areas(self, origin_name, termination_name, f): origin = self.find_population(origin_name) termination = self.find_population(termination_name) if origin is None: raise Exception(origin_name + " is not in the system") if termination is None: raise Exception(termination_name + " is not in the system") if f >= self.min_f: self.projections.append(InterAreaProjection(origin, termination, f)) else: print("Omitting connection {}->{} with f={}".format(origin_name, termination_name, f)) def connect_layers(self, origin_name, termination_name, b): origin = self.find_population(origin_name) termination = self.find_population(termination_name) if origin is None: raise Exception(origin_name + " is not in the system") if termination is None: raise Exception(termination_name + " is not in the system") self.projections.append(InterLaminarProjection(origin, termination, b)) def find_population(self, name): assert isinstance(name, str) result = None for population in self.populations: if population.name == name: result = population break return result def find_population_index(self, name): assert isinstance(name, str) result = None for i in range(len(self.populations)): if self.populations[i].name == name: result = i break return result def find_projection(self, origin_name, termination_name): assert isinstance(origin_name, str) assert isinstance(termination_name, str) result = None for projection in self.projections: if projection.origin.name == origin_name and projection.termination.name == termination_name: result = projection break return result def find_projection_index(self, origin_name, termination_name): assert isinstance(termination_name, str) result = None for i in range(len(self.projections)): projection = self.projections[i] if projection.origin.name == origin_name and projection.termination.name == termination_name: result = i break return result def find_pre(self, termination_name): assert isinstance(termination_name, str) result = [] for projection in self.projections: if projection.termination.name == termination_name: result.append(projection.origin) return result def normalize_FLNe(self): """ The fraction of extrinsic labelled neurons per source area is determined from tract-tracing data. However, if a System does not contain all connections in the brain, the sum of these fractions will be <1. This method rescales the fractions from the literature to fractions within the model. """ for population in self.populations: total_FLNe = 0 for pre in self.find_pre(population.name): projection = self.find_projection(pre.name, population.name) if isinstance(projection, InterAreaProjection): total_FLNe += projection.f for pre in self.find_pre(population.name): projection = self.find_projection(pre.name, population.name) if isinstance(projection, InterAreaProjection): projection.f = projection.f / total_FLNe # Here we could reduce # extrinsic inputs to account for ignored FLNe, like this: # population.e = population.e * total_FLNe. However, the ignored FLNe are mostly # due to omission of feedback and lateral connections rather than areas outside the # model, and most of these are not onto L4, which is the layer getting extrinsic # input in the feedforward model. Ideally we would account for L4 inputs due to # lateral connections and areas outside the model, but we ignore these for # simplicity. def prune_FLNe(self, min_fraction): """ Removes projections that have FLNe less than min_fraction. :param min_fraction: Minimum FLNe of projections to keep in the model. """ def keep(projection): if isinstance(projection, InterAreaProjection) and projection.f < min_fraction: print( "Pruning sparse projection {}->{} {}".format( projection.origin.name, projection.termination.name, projection.f ) ) return False else: return True self.projections = [p for p in self.projections if keep(p)] def make_graph(self): graph = nx.DiGraph() for population in self.populations: graph.add_node(population.name) for projection in self.projections: graph.add_edge(projection.origin.name, projection.termination.name) return graph def print_description(self): for population in self.populations: print(population.get_description()) for projection in self.projections: print(projection.get_description()) def check_connected(self, input_indices=[0]): """ Checks that all populations in the system, except for identified input populations, have at least one input. """ for i in range(len(self.populations)): pop = self.populations[i] # print('{}: {}'.format(pop.name, [pre.name for pre in self.find_pre(pop.name)])) if i not in input_indices: assert self.find_pre(pop.name), "{} has no inputs".format(pop.name) def merge_populations(self, to_keep, to_merge): """ Combines two populations, resulting in a population that has all the connections of both. :param to_keep: Name of merged population to keep :param to_merge: Name of merged population to remove after the merge """ # this could create redundant self-projections, but at the moment the code base doesn't do self-projections # TODO: weighted average of layer properties # TODO: weighted average of connection properties where connections overlap keep_pop = self.find_population(to_keep) merge_pop = self.find_population(to_merge) projections_to_drop = [] for projection in self.projections: if projection.termination.name == to_merge: if projection.origin.name == to_keep: # don't need projection between merged populations projections_to_drop.append(projection) elif self.find_projection(projection.origin.name, keep_pop.name): # new projection already exists projections_to_drop.append(projection) else: projection.termination = keep_pop if projection.origin.name == to_merge: if projection.termination.name == to_keep: projections_to_drop.append(projection) elif self.find_projection(keep_pop.name, projection.termination.name): projections_to_drop.append(projection) else: projection.origin = keep_pop for projection in projections_to_drop: self.projections.remove(projection) self.populations.remove(merge_pop) def get_example_system(): result = System() result.add_input(250000, 0.02) result.add("V1", 10000000, 2000, 0.1) result.add("V2", 10000000, 2000, 0.2) result.add("V4", 5000000, 2000, 0.4) result.connect_areas("INPUT", "V1", 1.0) result.connect_areas("V1", "V2", 1.0) result.connect_areas("V1", "V4", 0.5) result.connect_areas("V2", "V4", 0.5) return result def get_example_small(): result = System() result.add_input(750000, 0.02) result.add("V1_4", 53000000, 500, 0.09) result.add("V1_23", 53000000, 1000, 0.1) result.add("V1_5", 27000000, 3000, 0.11) result.add("V2_4", 33000000, 500, 0.19) result.add("V2_23", 33000000, 1000, 0.2) result.add("V2_5", 17000000, 3000, 0.21) result.add("V4_4", 17000000, 500, 0.39) result.add("V4_23", 17000000, 1000, 0.4) result.add("V4_5", 8000000, 3000, 0.41) result.connect_areas("INPUT", "V1_4", 1.0) result.connect_layers("V1_4", "V1_23", 800.0) result.connect_layers("V1_23", "V1_5", 3000.0) result.connect_areas("V1_5", "V2_4", 1.0) result.connect_layers("V2_4", "V2_23", 800.0) result.connect_layers("V2_23", "V2_5", 3000.0) result.connect_areas("V1_5", "V4_4", 0.15) result.connect_areas("V2_5", "V4_4", 0.85) result.connect_layers("V4_4", "V4_23", 800.0) result.connect_layers("V4_23", "V4_5", 3000.0) return result def get_example_medium(): # This example was written before the code distinguished interarea and interlaminar # connections. Interarea connections are used throughout (even between layers) to # preserve it as-is. result = System() result.add_input(750000, 0.02) result.add("LGNparvo", 2000000, 1000, 0.04) result.add("V1_4", 53000000, 500, 0.1) result.add("V1_23", 53000000, 1000, 0.13) result.add("V2_4", 33000000, 500, 0.2) result.add("V2_23", 33000000, 1000, 0.26) result.add("V4_4", 17000000, 500, 0.4) result.add("V4_23", 17000000, 1000, 0.5) result.add("MT_4", 4800000, 500, 1.0) result.add("MT_23", 4800000, 1000, 1.1) result.add("VOT_4", 6000000, 500, 1.4) result.add("VOT_23", 6000000, 1000, 1.5) result.add("PITd_4", 5700000, 500, 3.0) result.add("PITd_23", 5700000, 1000, 4.0) result.add("DP_4", 17000000, 500, 1.7) result.add("DP_23", 17000000, 1000, 1.8) # input result.connect_areas("INPUT", "LGNparvo", 1.0) result.connect_areas("LGNparvo", "V1_4", 1.0) # laminar connections result.connect_areas("V1_4", "V1_23", 1.0) result.connect_areas("V2_4", "V2_23", 1.0) result.connect_areas("V4_4", "V4_23", 1.0) result.connect_areas("MT_4", "MT_23", 1.0) result.connect_areas("VOT_4", "VOT_23", 1.0) result.connect_areas("PITd_4", "PITd_23", 1.0) result.connect_areas("DP_4", "DP_23", 1.0) # feedforward inter-areal connections result.connect_areas("V1_23", "V2_4", 1.0) result.connect_areas("V1_23", "V4_4", 0.0307) result.connect_areas("V1_23", "MT_4", 0.0235) result.connect_areas("V2_23", "V4_4", 0.9693) result.connect_areas("V2_23", "MT_4", 0.2346) result.connect_areas("V2_23", "PITd_4", 0.0026) result.connect_areas("V2_23", "DP_4", 0.2400)
each_df_dtm1, trained_svd = select_top_features_from_SVD(each_df_dtm, '', True) ls = ['svd_dim_'+str(x) for x in range(each_df_dtm1.shape[1])] each_df_dtm1 = pd.DataFrame(each_df_dtm1,columns=ls, index=orig_each_df_index) else: each_df_dtm1, _ = select_top_features_from_SVD(each_df_dtm, trained_svd, False) ls = ['svd_dim_'+str(x) for x in range(each_df_dtm1.shape[1])] each_df_dtm1 = pd.DataFrame(each_df_dtm1,columns=ls, index=orig_each_df_index) #### You have to create another vector with smaller vocab "small_nlp_vect" vectorizer #### You have to make sure you just do a Transform and not a Fit! if is_train: each_df_dtm2 = small_nlp_vect.fit_transform(each_df[nlp_column]) each_df_dtm2 = pd.DataFrame(each_df_dtm2.toarray(),index=orig_each_df_index, columns=small_nlp_vect.get_feature_names()) #### Since the top features from each class is a pretty bad idea, I am dropping it here! #print('Added top %d features from Train data' %(each_df_dtm2.shape[1])) else: each_df_dtm2 = small_nlp_vect.transform(each_df[nlp_column]) each_df_dtm2 = pd.DataFrame(each_df_dtm2.toarray(),index=orig_each_df_index, columns=small_nlp_vect.get_feature_names()) #### Since the top features from each class is a pretty bad idea, I am dropping it here! #print('Added top %d features from Test data' %(each_df_dtm2.shape[1])) # Now you have to combine them all to get a new each_df_best dataframe ### Since the top features from each class is not helping improve model, it is best dropped! #each_df_best = each_df_dtm2.join(each_df_dtm1) each_df_best = copy.deepcopy(each_df_dtm1) print('Combined Data Frame size = %s' %(each_df_best.shape,)) return each_df_best, big_nlp_vect, small_nlp_vect, trained_svd ########################################################################### def print_sparse_stats(X_dtm): """ Prints the stats around a Sparse Matrix (typically) generated in NLP problems. """ print ('Shape of Sparse Matrix: ', X_dtm.shape) print ('Amount of Non-Zero occurences: ', X_dtm.nnz) print (' Density: %.2f%%' % (100.0 * X_dtm.nnz / (X_dtm.shape[0] * X_dtm.shape[1]))) ################################################################################ def tokenize_and_stem(text): stemmer = SnowballStemmer("english") text = re.sub("^\d+\s|\s\d+\s|\s\d+$", " ", text) # first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)] filtered_tokens = [] # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation) for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) stems = [stemmer.stem(t) for t in filtered_tokens] return stems ################################################################################ from sklearn.feature_extraction import text from nltk.stem.snowball import SnowballStemmer from nltk.tokenize import TweetTokenizer, RegexpTokenizer ################################################################################ def select_best_nlp_vectorizer(model, data, col, target, metric, seed, modeltype,min_df): """ ################################################################################ #### VERY IMPORTANT: CountVectorizer can only deal with one Column at a Time!! #### SO YOU MUST NOT SEND AN ENTIRE DATAFRAME AND EXPECT IT TO VECTORIZE. IT BLOWS UP! #### Hence we repeatedly send one NLP column after another for Vectorizing and #### find the best NLP technique that yields the highest CV metric. Metrics could be: #### Accuracy, AUC, F1, Precision, and Recall. Having min_df as 10% (i.e. 0.1) is #### a good idea, since it results in much cleaner and much better terms selected. ################################################################################ """ stopWords = return_stop_words() ###################################################### #### This calculates based on the average number of words in an NLP column how many max_features min_df = 2 max_df = 0.95 ###################################################### if len(data) >= 1000000: max_features = 1000 elif len(data) >= 100000: max_features = 500 else: max_features = int(data[col].map(len).mean()*4) print(' A U T O - N L P P R O C E S S I N G O N N L P C O L U M N = %s ' %col) print('#################################################################################') print('Generating new features for NLP column = %s using NLP Transformers' %col) print(' Cleaning text in %s before doing transformation...' %col) start_time = time.time() if modeltype is None or modeltype == '': print('Since modeltype is None, Using TFIDF vectorizer with min_df and max_features') tvec = TfidfVectorizer(ngram_range=(1,3), stop_words=stopWords, max_features=max_features, min_df=min_df,max_df=max_df) data_dtm = data[col] data_dtm = tvec.fit_transform(data_dtm) print('Features: ', data_dtm.shape[1]) print_sparse_stats(data_dtm) #data_dense = convert_sparse_to_dense(data_dtm) return tvec, data_dtm else: data_dtm = data[col] ##### Then do a test train split using data and NLP column called "col" ######### X_train,X_test,y_train,y_test = train_test_split(data[col], data[target],test_size=0.2,random_state=seed) max_features_high = int(250000000/X_train.shape[0]) print(' However max_features limit = %d will limit numerous features from being generated' %max_features_high) best_vec = None all_vecs = {} all_models = {} if data.shape[0] < 10000: count_max_df = 0 print('Trying multiple max_df values in range %s to find best max_df...' %np.linspace(0.95,0.05,5)) for each_max_df in np.linspace(0.95,0.05,5): print(' max_df = %0.4f ' %each_max_df, end='') vect_5000 = CountVectorizer( ngram_range=(1, 3), max_features=max_features_high, max_df=each_max_df, strip_accents='unicode', tokenizer=None,preprocessor=None, min_df=min_df, binary=False, stop_words=None, token_pattern=r'\w{1,}') current_metric, current_model = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, vect_5000, seed, modeltype,verbose=0) if count_max_df == 0: best_metric = copy.deepcopy(current_metric) best_model = copy.deepcopy(current_model) else: if modeltype == 'Regression' or metric in ['logloss','log_loss']: if current_metric <= best_metric: best_metric = copy.deepcopy(current_metric) best_model = copy.deepcopy(current_model) best_max_df = copy.deepcopy(each_max_df) else: best_max_df = each_max_df + 0.20 break else: if current_metric >= best_metric: best_metric = copy.deepcopy(current_metric) best_model = copy.deepcopy(current_model) best_max_df = copy.deepcopy(each_max_df) else: best_max_df = each_max_df + 0.20 break count_max_df += 1 print('Best max_df selected to be %0.2f' %best_max_df) else: best_max_df = 0.5 print('\n#### Optimizing Count Vectorizer with best max_df=%0.2f, 1-3 n-grams and high features...' %best_max_df) vect_5000 = CountVectorizer( ngram_range=(1, 3), max_features=max_features_high, max_df=best_max_df, strip_accents='unicode', tokenizer=None, preprocessor=None, min_df=min_df, binary=False, stop_words=None, token_pattern=r'\w{1,}') best_metric, best_model = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, vect_5000, seed, modeltype,verbose=0) #### You have to set the best max df to the recent one plus 0.05 since it breaks when the metric drops vect_5000.max_df = best_max_df all_vecs[vect_5000] = best_metric all_models[vect_5000] = best_model ########################################################################## ##### It's BEST to use small max_features (50) and a low 0.001 min_df with n_gram (2-5). ###### There is no need in that case for stopwords or analyzer since the 2-grams take care of it #### Once you do above, there is no difference between count_vectorizer and tfidf_vectorizer #### Once u do above, increasing max_features from 50 to even 500 doesn't get you a higher score! ########################################################################## print('\n#### Using Count Vectorizer with Latin-1 encoding, limited max_features =%d and a min_df=%s with n_gram (1-5)' %(max_features,min_df)) vect_lemma = CountVectorizer(max_df=best_max_df, max_features=max_features, strip_accents='unicode', ngram_range=(1, 5), token_pattern=r'\w{1,}', min_df=min_df, stop_words=None, encoding='latin-1', binary=False, ) try: all_vecs[vect_lemma], all_models[vect_lemma] = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, vect_lemma, seed, modeltype) except: print('Error: Using CountVectorizer') print('\n# Using TFIDF vectorizer with binary=True, ngram = (1,3) and max_features=%d' %max_features_high) ##### This is based on artificially setting 5GB as being max memory limit for the term-matrix tvec = TfidfVectorizer( max_features=max_features_high, max_df=best_max_df, token_pattern=r'\w{1,}', strip_accents='unicode', sublinear_tf=True, binary=True, stop_words=None, ngram_range=(1, 3), min_df=min_df) all_vecs[tvec], all_models[tvec] = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, tvec, seed, modeltype) max_features_limit = int(tvec.fit_transform(data_dtm).shape[1]) print('\n# Using TFIDF vectorizer with latin-1 encoding, binary=False, ngram (1,3) and limited max_features') tvec2 = TfidfVectorizer( max_features=max_features, max_df=best_max_df, token_pattern=r'\w{1,}', sublinear_tf=True, # tokenizer=simple_tokenizer,preprocessor=simple_preprocessor, tokenizer=None, encoding='latin-1', min_df=min_df, stop_words=None, binary=False, strip_accents='unicode', use_idf=True, ngram_range=(1,3)) all_vecs[tvec2], all_models[tvec2] = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, tvec2, seed, modeltype) #print('\n# Using TFIDF vectorizer with Snowball Stemming, ngram (1,3) and very high max_features') print('\n# Finally comparing them against a Basic Count Vectorizer with all defaults, max_features = %d and lowercase=True' %max_features_high) cvect = CountVectorizer(min_df=2, lowercase=True, max_features=max_features_high, binary=False) all_vecs[cvect], all_models[cvect] = tokenize_test_by_metric(model, X_train, X_test, y_train, y_test, target, metric, cvect, seed, modeltype) ######## Once you have built 4 different transformers it is time to compare them if modeltype.endswith('Classification'): if metric in ['log_loss','logloss']: best_vec = pd.Series(all_vecs).idxmin() else: best_vec = pd.Series(all_vecs).idxmax() else: if modeltype == 'Regression': best_vec = pd.Series(all_vecs).idxmin() else: print('Error: Modeltype not recognized. You must choose Classification or Regression or None') return print('\nBest NLP technique selected is: \n%s' %best_vec) data_dtm = best_vec.transform(data_dtm) return best_vec, all_models[best_vec], data_dtm, max_features_limit ############################################################################ from sklearn.metrics import balanced_accuracy_score,mean_absolute_error,mean_squared_error def return_scoreval(scoretype, y_true, y_preds, y_proba, modeltype): if modeltype.endswith('Classification'): if scoretype == 'f1': try: scoreval = f1_score(y_true, y_preds) except: scoreval = f1_score(y_true, y_preds, average = 'micro') elif scoretype == 'roc_auc': #### ROC AUC can be computed only for Binary classifications ### try: scoreval = roc_auc_score(y_true, y_proba) except: scoreval = balanced_accuracy_score(y_true, y_preds) print('Multi-class problem. Instead of ROC-AUC, Balanced Accuracy computed') elif scoretype == 'precision': try: scoreval = precision_score(y_true, y_preds) except: scoreval = precision_score(y_true, y_preds, average='micro') elif scoretype == 'recall': try: scoreval = recall_score(y_true, y_preds) except: scoreval = recall_score(y_true, y_preds, average='micro') elif scoretype in ['balanced_accuracy','accuracy','balanced-accuracy']: try: scoreval = balanced_accuracy_score(y_true, y_preds) except: scoreval = accuracy(y_true, y_preds) else: print('Scoring Type not Recognized - selecting default as F1.') scoretype == 'f1' try: scoreval = f1_score(y_true, y_preds) except: scoreval = f1_score(y_true, y_preds, average='micro') else: if scoretype == 'rmse': try: scoreval = np.sqrt(mean_squared_error(y_true, y_preds)) except: scoreval = 0 elif scoretype == 'mae': try: scoreval = np.sqrt(mean_absolute_error(y_true, y_preds)) except: scoreval =
input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, target_sizes: Union[int, List[int]] = [], **kwargs, ): # saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this self.save_hyperparameters() # pass additional arguments to BaseModel.__init__, mandatory call - do not skip this super().__init__(**kwargs) self.network = FullyConnectedModule( input_size=self.hparams.input_size * len(to_list(self.hparams.target_sizes)), output_size=self.hparams.output_size * sum(to_list(self.hparams.target_sizes)), hidden_size=self.hparams.hidden_size, n_hidden_layers=self.hparams.n_hidden_layers, ) def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: # x is a batch generated based on the TimeSeriesDataset batch_size = x["encoder_cont"].size(0) network_input = x["encoder_cont"].view(batch_size, -1) prediction = self.network(network_input) # RESHAPE output to batch_size x n_decoder_timesteps x sum_of_target_sizes prediction = prediction.unsqueeze(-1).view(batch_size, self.hparams.output_size, sum(self.hparams.target_sizes)) # RESHAPE into list of batch_size x n_decoder_timesteps x target_sizes[i] where i=1..len(target_sizes) stops = np.cumsum(self.hparams.target_sizes) starts = stops - self.hparams.target_sizes prediction = [prediction[..., start:stop] for start, stop in zip(starts, stops)] if isinstance(self.hparams.target_sizes, int): # only one target prediction = prediction[0] # We need to return a dictionary that at least contains the prediction and the target_scale. # The parameter can be directly forwarded from the input. return dict(prediction=prediction, target_scale=x["target_scale"]) @classmethod def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs): # By default only handle targets of size one here, categorical targets would be of larger size new_kwargs = { "target_sizes": [1] * len(to_list(dataset.target)), "output_size": dataset.max_prediction_length, "input_size": dataset.max_encoder_length, } new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset # example for dataset validation assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length" assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length" assert ( len(dataset.time_varying_known_categoricals) == 0 and len(dataset.time_varying_known_reals) == 0 and len(dataset.time_varying_unknown_categoricals) == 0 and len(dataset.static_categoricals) == 0 and len(dataset.static_reals) == 0 and len(dataset.time_varying_unknown_reals) == len(dataset.target_names) # Expect as as many unknown reals as targets ), "Only covariate should be in 'time_varying_unknown_reals'" return super().from_dataset(dataset, **new_kwargs) model = FullyConnectedMultiTargetModel.from_dataset( multi_target_dataset, hidden_size=10, n_hidden_layers=2, loss=MultiLoss(metrics=[MAE(), SMAPE()], weights=[2.0, 1.0]), ) model.summarize("full") model.hparams # %% [markdown] # Now, let's pass some data through our model and calculate the loss. # %% out = model(x) out # %% y_hat = model.transform_output( out ) # the model's transform_output method re-scales/de-normalizes the predictions to into the real target space model.loss(y_hat, y) # %% [markdown] # ## Using covariates # %% from pytorch_forecasting.models.base_model import BaseModelWithCovariates print(BaseModelWithCovariates.__doc__) # %% from typing import Dict, List, Tuple from pytorch_forecasting.models.nn import MultiEmbedding class FullyConnectedModelWithCovariates(BaseModelWithCovariates): def __init__( self, input_size: int, output_size: int, hidden_size: int, n_hidden_layers: int, x_reals: List[str], x_categoricals: List[str], embedding_sizes: Dict[str, Tuple[int, int]], embedding_labels: Dict[str, List[str]], static_categoricals: List[str], static_reals: List[str], time_varying_categoricals_encoder: List[str], time_varying_categoricals_decoder: List[str], time_varying_reals_encoder: List[str], time_varying_reals_decoder: List[str], embedding_paddings: List[str], categorical_groups: Dict[str, List[str]], **kwargs, ): # saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this self.save_hyperparameters() # pass additional arguments to BaseModel.__init__, mandatory call - do not skip this super().__init__(**kwargs) # create embedder - can be fed with x["encoder_cat"] or x["decoder_cat"] and will return # dictionary of category names mapped to embeddings self.input_embeddings = MultiEmbedding( embedding_sizes=self.hparams.embedding_sizes, categorical_groups=self.hparams.categorical_groups, embedding_paddings=self.hparams.embedding_paddings, x_categoricals=self.hparams.x_categoricals, max_embedding_size=self.hparams.hidden_size, ) # calculate the size of all concatenated embeddings + continous variables n_features = sum( embedding_size for classes_size, embedding_size in self.hparams.embedding_sizes.values() ) + len(self.reals) # create network that will be fed with continious variables and embeddings self.network = FullyConnectedModule( input_size=self.hparams.input_size * n_features, output_size=self.hparams.output_size, hidden_size=self.hparams.hidden_size, n_hidden_layers=self.hparams.n_hidden_layers, ) def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: # x is a batch generated based on the TimeSeriesDataset batch_size = x["encoder_lengths"].size(0) embeddings = self.input_embeddings(x["encoder_cat"]) # returns dictionary with embedding tensors network_input = torch.cat( [x["encoder_cont"]] + [ emb for name, emb in embeddings.items() if name in self.encoder_variables or name in self.static_variables ], dim=-1, ) prediction = self.network(network_input.view(batch_size, -1)) # We need to return a dictionary that at least contains the prediction and the target_scale. # The parameter can be directly forwarded from the input. return dict(prediction=prediction, target_scale=x["target_scale"]) @classmethod def from_dataset(cls, dataset: TimeSeriesDataSet, **kwargs): new_kwargs = { "output_size": dataset.max_prediction_length, "input_size": dataset.max_encoder_length, } new_kwargs.update(kwargs) # use to pass real hyperparameters and override defaults set by dataset # example for dataset validation assert dataset.max_prediction_length == dataset.min_prediction_length, "Decoder only supports a fixed length" assert dataset.min_encoder_length == dataset.max_encoder_length, "Encoder only supports a fixed length" return super().from_dataset(dataset, **new_kwargs) # %% [markdown] # Note that the model does not make use of the known covariates in the decoder - this is obviously suboptimal but not scope of this tutorial. Anyways, let us create a new dataset with categorical variables and see how the model can be instantiated from it. # %% import numpy as np import pandas as pd from pytorch_forecasting import TimeSeriesDataSet test_data_with_covariates = pd.DataFrame( dict( # as before value=np.random.rand(30), group=np.repeat(np.arange(3), 10), time_idx=np.tile(np.arange(10), 3), # now adding covariates categorical_covariate=np.random.choice(["a", "b"], size=30), real_covariate=np.random.rand(30), ) ).astype( dict(group=str) ) # categorical covariates have to be of string type test_data_with_covariates # %% # create the dataset from the pandas dataframe dataset_with_covariates = TimeSeriesDataSet( test_data_with_covariates, group_ids=["group"], target="value", time_idx="time_idx", min_encoder_length=5, max_encoder_length=5, min_prediction_length=2, max_prediction_length=2, time_varying_unknown_reals=["value"], time_varying_known_reals=["real_covariate"], time_varying_known_categoricals=["categorical_covariate"], static_categoricals=["group"], ) model = FullyConnectedModelWithCovariates.from_dataset(dataset_with_covariates, hidden_size=10, n_hidden_layers=2) model.summarize("full") # print model summary model.hparams # %% [markdown] # To test that the model could be trained, pass a sample batch. # %% x, y = next(iter(dataset_with_covariates.to_dataloader(batch_size=4))) # generate batch model(x) # pass batch through model # %% [markdown] # ## Implementing an autoregressive / recurrent model # %% from torch.nn.utils import rnn from pytorch_forecasting.models.base_model import AutoRegressiveBaseModel from pytorch_forecasting.models.nn import LSTM class LSTMModel(AutoRegressiveBaseModel): def __init__( self, target: str, target_lags: Dict[str, Dict[str, int]], n_layers: int, hidden_size: int, dropout: float = 0.1, **kwargs, ): # arguments target and target_lags are required for autoregressive models # even though target_lags cannot be used without covariates # saves arguments in signature to `.hparams` attribute, mandatory call - do not skip this self.save_hyperparameters() # pass additional arguments to BaseModel.__init__, mandatory call - do not skip this super().__init__(**kwargs) # use version of LSTM that can handle zero-length sequences self.lstm = LSTM( hidden_size=self.hparams.hidden_size, input_size=1, num_layers=self.hparams.n_layers, dropout=self.hparams.dropout, batch_first=True, ) self.output_layer = nn.Linear(self.hparams.hidden_size, 1) def encode(self, x: Dict[str, torch.Tensor]): # we need at least one encoding step as because the target needs to be lagged by one time step # because we use the custom LSTM, we do not have to require encoder lengths of > 1 # but can handle lengths of >= 1 assert x["encoder_lengths"].min() >= 1 input_vector = x["encoder_cont"].clone() # lag target by one input_vector[..., self.target_positions] = torch.roll( input_vector[..., self.target_positions], shifts=1, dims=1 ) input_vector = input_vector[:, 1:] # first time step cannot be used because of lagging # determine effective encoder_length length effective_encoder_lengths = x["encoder_lengths"] - 1 # run through LSTM network _, hidden_state = self.lstm( input_vector, lengths=effective_encoder_lengths, enforce_sorted=False # passing the lengths directly ) # second ouput is not needed (hidden state) return hidden_state def decode(self, x: Dict[str, torch.Tensor], hidden_state): # again lag target by one input_vector = x["decoder_cont"].clone() input_vector[..., self.target_positions] = torch.roll( input_vector[..., self.target_positions], shifts=1, dims=1 ) # but this time fill in missing target from encoder_cont at the first time step instead of throwing it away last_encoder_target = x["encoder_cont"][ torch.arange(x["encoder_cont"].size(0), device=x["encoder_cont"].device), x["encoder_lengths"] - 1, self.target_positions.unsqueeze(-1), ].T input_vector[:, 0, self.target_positions] = last_encoder_target if self.training: # training mode lstm_output, _ = self.lstm(input_vector, hidden_state, lengths=x["decoder_lengths"], enforce_sorted=False) # transform into right shape prediction = self.output_layer(lstm_output) # predictions are not yet rescaled return dict(prediction=prediction, target_scale=x["target_scale"]) else: # prediction mode target_pos = self.target_positions def decode_one(idx, lagged_targets, hidden_state): x = input_vector[:, [idx]] # overwrite at target positions x[:, 0, target_pos] = lagged_targets[-1] # take most recent target (i.e. lag=1) lstm_output, hidden_state = self.lstm(x, hidden_state) # transform into right shape prediction = self.output_layer(lstm_output)[:, 0] # take first timestep return prediction, hidden_state # make predictions which are fed into next step output = self.decode_autoregressive( decode_one, first_target=input_vector[:, 0, target_pos], first_hidden_state=hidden_state, target_scale=x["target_scale"], n_decoder_steps=input_vector.size(1), ) # predictions are already rescaled return dict(prediction=output, output_transformation=None, target_scale=x["target_scale"]) def forward(self, x: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: hidden_state = self.encode(x) # encode to hidden state output = self.decode(x, hidden_state) # decode leveraging hidden state return output model = LSTMModel.from_dataset(dataset, n_layers=2, hidden_size=10) model.summarize("full") model.hparams # %% x, y = next(iter(dataloader)) print( "prediction shape in training:", model(x)["prediction"].size() ) # batch_size x decoder time steps x 1 (1 for one target dimension) model.eval() # set model into eval mode to use autoregressive prediction print("prediction shape in inference:", model(x)["prediction"].size()) # should be the same as in training # %% [markdown] # ## Using and defining a custom/non-trivial metric # %% [markdown] # To use a different metric, simply pass it
context['jumlah'] = jml elif rating == '2': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.rating=2 AND tp.id_penginapan LIKE 'K%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.rating=2 AND tp.id_penginapan LIKE 'K%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif rating == '1': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.rating=1 AND tp.id_penginapan LIKE 'K%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.rating=1 AND tp.id_penginapan LIKE 'K%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml #Filter berdasarkan price priceMin = request.POST.get('priceMin') priceMax = request.POST.get('priceMax') if priceMin != "" and priceMax != "": with connection.cursor() as c: if reservasi == 'Apartemen': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from apartemen)", [priceMin, priceMax]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from apartemen)) AS temp", [priceMin, priceMax]) jml = dictfetchall(c) context['jumlah'] = jml elif reservasi == 'Villa': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from villa)", [priceMin, priceMax]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from villa)) AS temp", [priceMin, priceMax]) jml = dictfetchall(c) context['jumlah'] = jml elif reservasi == 'Kost': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from kos)", [priceMin, priceMax]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.harga_termurah >= %s AND tp.harga_termurah <= %s AND tp.id_penginapan IN (select id_penginapan from kos)) AS temp", [priceMin, priceMax]) jml = dictfetchall(c) context['jumlah'] = jml #Filter berdasarkan checkin dan checkout inputCheckin = request.POST.get('checkin') inputCheckout = request.POST.get('checkout') with connection.cursor() as c: if (inputCheckin != '' and inputCheckout != ''): if reservasi == 'Apartemen': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, A.id_penginapan FROM APARTEMEN A, APARTEMEN_ROOM AR, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = A.id_penginapan AND A.id_penginapan = AR.id_apartemen AND TEM.id_penginapan = F.id_penginapan AND (AR.id_apartemen, AR.kode_room) NOT IN (SELECT PAR.id_apartemen, PAR.id_kode_room FROM PILIHAN_APARTEMEN_ROOM PAR, TRANSAKSI_PENGINAPAN TP WHERE PAR.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)",[inputCheckin, inputCheckout]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, A.id_penginapan FROM APARTEMEN A, APARTEMEN_ROOM AR, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = A.id_penginapan AND A.id_penginapan = AR.id_apartemen AND TEM.id_penginapan = F.id_penginapan AND (AR.id_apartemen, AR.kode_room) NOT IN (SELECT PAR.id_apartemen, PAR.id_kode_room FROM PILIHAN_APARTEMEN_ROOM PAR, TRANSAKSI_PENGINAPAN TP WHERE PAR.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)) AS temp", [inputCheckin, inputCheckout]) jml = dictfetchall(c) context['jumlah'] = jml elif reservasi == 'Villa': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, V.id_penginapan FROM VILLA V, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = V.id_penginapan AND TEM.id_penginapan = F.id_penginapan AND V.id_penginapan NOT IN (SELECT PV.id_villa FROM PILIHAN_VILLA PV, TRANSAKSI_PENGINAPAN TP WHERE PV.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)",[inputCheckin, inputCheckout]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, V.id_penginapan FROM VILLA V, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = V.id_penginapan AND TEM.id_penginapan = F.id_penginapan AND V.id_penginapan NOT IN (SELECT PV.id_villa FROM PILIHAN_VILLA PV, TRANSAKSI_PENGINAPAN TP WHERE PV.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)) AS temp", [inputCheckin, inputCheckout]) jml = dictfetchall(c) context['jumlah'] = jml elif reservasi == 'Kost': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, K.id_penginapan FROM KOS K, KOS_ROOM KR, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = K.id_penginapan AND K.id_penginapan = KR.id_kos AND TEM.id_penginapan = F.id_penginapan AND (KR.id_kos, KR.kode_room) NOT IN (SELECT PKR.id_kos, PKR.id_kode_room FROM PILIHAN_KOS_ROOM PKR, TRANSAKSI_PENGINAPAN TP WHERE PKR.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)",[inputCheckin, inputCheckout]) res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, K.id_penginapan FROM KOS K, KOS_ROOM KR, TEMPAT_PENGINAPAN TEM, FOTO F WHERE TEM.id_penginapan = K.id_penginapan AND K.id_penginapan = KR.id_kos AND TEM.id_penginapan = F.id_penginapan AND (KR.id_kos, KR.kode_room) NOT IN (SELECT PKR.id_kos, PKR.id_kode_room FROM PILIHAN_KOS_ROOM PKR, TRANSAKSI_PENGINAPAN TP WHERE PKR.id_transaksi_penginapan = TP.id_transaksi_penginapan AND TP.tgl_checkin <= %s AND TP.tgl_checkout >= %s)) AS temp", [inputCheckin, inputCheckout]) jml = dictfetchall(c) context['jumlah'] = jml context['ci'] = checkin context['co'] = checkout context['jum'] = jumlah set_session(request, context) return render(request,'hasil_pencarian.html', context) elif request.method == "GET": print("masuk GET") print(lokasi) #Berdasarkan lokasi with connection.cursor() as c: #Apartemen if reservasi == 'Apartemen': if lokasi == 'DKI Jakarta': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='DKI Jakarta' AND tp.id_penginapan LIKE 'A%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='DKI Jakarta' AND tp.id_penginapan LIKE 'A%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif lokasi == 'Jawa Barat': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Barat' AND tp.id_penginapan LIKE 'A%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Barat' AND tp.id_penginapan LIKE 'A%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif lokasi == 'Jawa Tengah': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Tengah' AND tp.id_penginapan LIKE 'A%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Tengah' AND tp.id_penginapan LIKE 'A%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif lokasi == 'Jawa Timur': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Timur'AND tp.id_penginapan LIKE 'A%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Jawa Timur' AND tp.id_penginapan LIKE 'A%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif lokasi == 'Banten': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Banten' AND tp.id_penginapan LIKE 'A%'") res = dictfetchall(c) context['res'] = res c.execute("SELECT COUNT(*) FROM(SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND tp.provinsi='Banten' AND tp.id_penginapan LIKE 'A%') AS temp") jml = dictfetchall(c) context['jumlah'] = jml elif lokasi == 'Yogyakarta': c.execute("SELECT distinct on (nama) nama, link, alamat_lengkap, rating, harga_termurah, tp.id_penginapan FROM tempat_penginapan tp, foto f WHERE tp.Id_penginapan = f.Id_penginapan AND
# # Copyright (c) 2020 <NAME> <<EMAIL>> # # This source code is licensed under an MIT license found in the LICENSE file in the root directory of this project. # import os.path as osp from itertools import product import matplotlib import matplotlib.colors as colors import matplotlib.pyplot as plt import scipy from scipy.stats import sem import numpy as np import rlutils as rl from matplotlib.collections import PatchCollection from matplotlib.patches import Rectangle, Polygon from .cycle_mdp_dataset import load_cycle_mdp_dataset get_avg_ep_len = lambda exp: np.mean(np.mean(exp.episode_length, axis=-1), axis=-1) def transfer_episode_length(ep_len, color='C0', label=None): num_eps = np.shape(ep_len)[2] for task_idx in range(np.shape(ep_len)[1]): ep_idx = np.arange(num_eps) + task_idx * num_eps rl.plot.mean_with_sem(ep_idx, ep_len[:, task_idx, :], axis=0, color=color, label=label) label = None def plot_lr_comparison_qlearning(experiment_list): # pragma: no cover lr_avg_ep_len = [(e.learning_rate, get_avg_ep_len(e)) for e in experiment_list] lr_avg_ep_len = sorted(lr_avg_ep_len, key=lambda e: e[0]) plt.violinplot([e[1] for e in lr_avg_ep_len]) plt.xticks(range(1, len(lr_avg_ep_len) + 1), [e[0] for e in lr_avg_ep_len]) plt.xlabel('Learning Rate') plt.ylabel('Avg. episode length') def plot_lr_comparison_sflearning(experiment_list): # pragma: no cover lr_ep_len = [(e.learning_rate_sf, e.learning_rate_reward, get_avg_ep_len(e)) for e in experiment_list] lr_ep_len = sorted(lr_ep_len, key=lambda e: (e[1], e[0])) plt.figure(figsize=(15, 6)) plt.violinplot([e[2] for e in lr_ep_len]) xticks = range(1, len(lr_ep_len) + 1) xticks_lab = [r'lr\_sf={}, lr\_r={}'.format(e[0], e[1]) for e in lr_ep_len] plt.xticks(xticks, xticks_lab) plt.xlabel('Learning Rate') plt.ylabel('Avg. episode length') def plot_lr_comparison_dirichlet_process_model(experiment_list): # pragma: no cover lr_a_b_avg_ep_len = [(e.learning_rate, e.alpha, e.beta, get_avg_ep_len(e)) for e in experiment_list] lr_a_b_avg_ep_len = sorted(lr_a_b_avg_ep_len, key=lambda e: (e[2], e[1], e[0])) plt.figure(figsize=(15, 6)) for i, lr in enumerate([0.1, 0.5, 0.9]): ep_len = [e[3] for e in filter(lambda e: e[0] == 0.1, lr_a_b_avg_ep_len)] plt.violinplot(ep_len, positions=np.arange(len(ep_len)) * 3 + i) xticks_labels = [r'lr={}, $\alpha$={}, $\beta$={}'.format(e[0], e[1], e[2]) for e in lr_a_b_avg_ep_len] _ = plt.xticks(range(len(xticks_labels)), xticks_labels, rotation=90) _ = plt.ylabel('Avg. episode length') def plot_lr_sf_lr_rew_comparison_dirichlet_process_model(experiment_list): # pragma: no cover param_ep_len = [] for e in experiment_list: param_ep_len.append((e.learning_rate_sf, e.learning_rate_reward, e.alpha, e.beta, get_avg_ep_len(e))) param_ep_len = sorted(param_ep_len, key=lambda e: (e[3], e[2], e[0], e[1])) lr_sf_list = [p[0] for p in param_ep_len] lr_rew_list = [p[1] for p in param_ep_len] plt.figure(figsize=(15, 6)) for i, (lr_sf, lr_rew) in enumerate(product(lr_sf_list, lr_rew_list)): param_filtered = filter(lambda e: e[0] == lr_sf and e[1] == lr_rew, param_ep_len) ep_len = [e[4] for e in param_filtered] plt.violinplot(ep_len, positions=np.arange(len(ep_len)) * len(lr_sf_list) * len(lr_rew_list) + i) xticks_labels = [] for e in param_ep_len: xticks_labels.append( r'lr_sf={}, lr_rew={}, $\alpha$={}, $\beta$={}'.format(e[0], e[1], e[2], e[3]) ) _ = plt.xticks(range(len(xticks_labels)), xticks_labels, rotation=90) _ = plt.ylabel('Avg. episode length') def plot_alpha_vs_belief_space_size(experiment_set, hparam_alpha_beta, color_list=None, legend=False, figsize=(2.5, 2)): # pragma: no cover plt.figure(figsize=figsize) alpha_values = experiment_set.get_hparam_values('alpha') beta_values = experiment_set.get_hparam_values('beta') if color_list is None: color_list = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'] posterior_size = lambda e: [np.shape(cnt)[-1] for cnt in e.results['count']] exp_getter = lambda a, b: experiment_set.get_experiment_list_by_hparam(hparam_alpha_beta(a, b))[0] for beta, c in zip(beta_values, color_list): exp_alpha = [exp_getter(alpha, beta) for alpha in alpha_values] belief_size_list = np.stack([posterior_size(exp) for exp in exp_alpha]) if beta == np.inf: beta = r'$\infty$' elif beta == int(beta): beta = int(beta) rl.plot.mean_with_sem(alpha_values, belief_size_list, axis=1, color=c, label=r'$\beta$={}'.format(beta)) plt.gca().set_xscale('log') plt.xlabel(r'$\alpha$ Value') plt.ylabel('Avg. belief space size') plt.ylim([0, 9.6]) plt.yticks([1, 2, 3, 4, 5, 6], ['{:4d}'.format(i) for i in [1, 2, 3, 4, 5, 6]]) plt.xticks(alpha_values) if legend: plt.legend(frameon=False) def plot_cycle_mdp_belief_space_size(experiment_set, hparam_alpha_beta): plot_alpha_vs_belief_space_size(experiment_set, hparam_alpha_beta, legend=True, figsize=(1.8, 2)) yticks = list(range(0, 21, 2)) plt.yticks(yticks, ['{:2d}'.format(i) for i in yticks]) plt.xticks([1e-3, 1e0, 1e3]) plt.ylim([0, 21]) def plot_alpha_vs_episode_length(experiment_set, hparam_alpha_beta, color_list=None, legend=False): # pragma: no cover plt.figure(figsize=(2.5, 2)) alpha_values = experiment_set.get_hparam_values('alpha') beta_values = experiment_set.get_hparam_values('beta') if color_list is None: color_list = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'] avg_ep_len = lambda exp: np.mean(np.mean(exp.results['episode_length'], axis=-1), axis=-1) exp_getter = lambda a, b: experiment_set.get_experiment_list_by_hparam(hparam_alpha_beta(a, b))[0] for beta, c in zip(beta_values, color_list): exp_alpha = [exp_getter(alpha, beta) for alpha in alpha_values] ep_len_list = np.stack([avg_ep_len(exp) for exp in exp_alpha]) if beta == np.inf: beta = r'$\infty$' elif beta == int(beta): beta = int(beta) rl.plot.mean_with_sem(alpha_values, ep_len_list, axis=1, color=c, label=r'$\beta$={}'.format(beta)) plt.gca().set_xscale('log') plt.xlabel(r'$\alpha$ Value') plt.ylabel('Avg. Belief Space Size') plt.xticks(alpha_values) plt.ylim([100, 400]) if legend: plt.legend(frameon=False) def plot_alpha_vs_total_reward(experiment_set, hparam_alpha_beta, color_list=None, legend=False, figsize=(1.8, 2)): # pragma: no cover plt.figure(figsize=figsize) alpha_values = experiment_set.get_hparam_values('alpha') beta_values = experiment_set.get_hparam_values('beta') if color_list is None: color_list = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'] avg_ep_len = lambda exp: np.mean(exp.results['total_reward'], axis=-1) exp_getter = lambda a, b: experiment_set.get_experiment_list_by_hparam(hparam_alpha_beta(a, b))[0] for beta, c in zip(beta_values, color_list): exp_alpha = [exp_getter(alpha, beta) for alpha in alpha_values] ep_len_list = np.stack([avg_ep_len(exp) for exp in exp_alpha]) if beta == np.inf: beta = r'$\infty$' elif beta == int(beta): beta = int(beta) rl.plot.mean_with_sem(alpha_values, ep_len_list, axis=1, color=c, label=r'$\beta$={}'.format(beta)) plt.gca().set_xscale('log') plt.xlabel(r'$\alpha$ Value') plt.ylabel('Avg. Total Reward') plt.xticks([alpha_values[0], 1., alpha_values[-1]]) plt.ylim([1.8, 3.4]) plt.yticks([2, 3]) if legend: plt.legend(frameon=False) def plot_avg_highest_count(experiment_set, hparam_alpha_beta, figsize=(3, 2)): plt.figure(figsize=figsize) partition_idx_seq_list = load_cycle_mdp_dataset()[1] gt_abs_count = np.sum(partition_idx_seq_list == 0, axis=-1) gt_abs_count_m = np.mean(gt_abs_count, axis=-1) gt_abs_count_e = sem(gt_abs_count, axis=-1) plt.fill_between( [-1, len(experiment_set.experiment_list) + 1], y1=gt_abs_count_m+gt_abs_count_e, y2=gt_abs_count_m-gt_abs_count_e, color='k', alpha=0.2 ) plt.bar( [0], [gt_abs_count_m], yerr=[gt_abs_count_e], color='w', edgecolor='k', label='Ground Truth\n(G.T.)' ) alpha_values = experiment_set.get_hparam_values('alpha') beta_values = experiment_set.get_hparam_values('beta') get_exp = lambda a, b: experiment_set.get_experiment_list_by_hparam(hparam_alpha_beta(a, b))[0] get_counts = lambda e: [np.max(c[-1]) for c in e.results['count']] for i, (beta, color) in enumerate(zip(beta_values, ['C0', 'C1', 'C2'])): counts = np.stack([get_counts(get_exp(alpha, beta)) for alpha in alpha_values]) counts_m = np.mean(counts, axis=-1) counts_e = sem(counts, axis=-1) xvalues = np.arange(len(alpha_values)) xvalues += i * len(alpha_values) + 1 if beta == np.inf: beta_str = r'$\infty$' elif beta == int(beta): beta_str = '{}'.format(int(beta)) else: beta_str = '{}'.format(beta) plt.bar(xvalues, counts_m, yerr=counts_e, color=color, label=r'$\beta$={}'.format(beta_str)) alpha_val_str = ['{}'.format(a) for a in alpha_values] alpha_val_str = ['G. T.'] + alpha_val_str + alpha_val_str + alpha_val_str plt.xticks(range(len(alpha_val_str)), alpha_val_str, rotation='vertical') plt.xlabel(r'$\alpha$ Value') plt.xlim([-.8, len(experiment_set.experiment_list) + .8]) plt.ylabel('Average Highest Count') plt.yticks([0, 5, 10, 15, 20]) plt.ylim([0, 27]) plt.legend(loc=9, ncol=4, frameon=False, handlelength=.8, handletextpad=.4, columnspacing=1.0) def plot_convergence_rate_comparison(ep_len_list, color_list, label_list, figsize=(10, 5)): # pragma: no cover f, (ax1, ax2) = plt.subplots( nrows=2, ncols=1, figsize=figsize, sharex=True, gridspec_kw={'height_ratios': [1, 5]} ) plt.sca(ax1) for ep_len, c, l in zip(ep_len_list, color_list, label_list): transfer_episode_length(ep_len, color=c, label=l) plt.yticks([1500, 2000], ['1500', '2000\n(Timeout)']) plt.ylim([1400, 2100]) ax1.spines['bottom'].set_visible(False) ax1.set_xticks([]) ax1.xaxis.tick_top() plt.sca(ax2) for ep_len, c, l in zip(ep_len_list, color_list, label_list): transfer_episode_length(ep_len, color=c, label=l) ax2.set_ylim([0, 310]) ax2.spines['top'].set_visible(False) ax2.set_xticks([1, 200, 400, 600, 800, 1000]) plt.xlabel('Episode') plt.ylabel('Avg. episode length') plt.legend() def process_and_pad_posterior(ar, l): if len(ar) > 1: ar = list(ar[-1:]) + list(ar[:-1]) return np.array(list(ar) + [np.nan] * (l - len(ar)), dtype=np.float32)[::-1] # prefix_nan_pad = lambda ar, l: np.array([np.nan] * (l - len(ar)) + list(ar[:-1][::-1]) + list(ar[-1])) def plot_belief_posterior(experiment, repeat=0, belief_size=None): # pragma: no cover import matplotlib import matplotlib.colors as colors if belief_size is None: belief_size = np.max([len(p) for p in experiment.posterior_episode_log]) posterior_padded = np.stack([process_and_pad_posterior(p, belief_size) for p in experiment.posterior_episode_log]) posterior_padded = posterior_padded[repeat * 1000:(repeat + 1) * 1000].transpose() posterior_padded = posterior_padded[::-1] # cmap = matplotlib.cm.winter cmap = colors.LinearSegmentedColormap.from_list( 'custom_cmap', matplotlib.cm.winter(np.linspace(.5, 1, 5)) ) cmap.set_bad('white', 1.) numrows, numcols = np.shape(posterior_padded) plt.matshow( posterior_padded, cmap=cmap, aspect='auto', origin='upper', extent=(0.5, numcols + 0.5, numrows + 0.5, +0.5), fignum=1 ) cbar = plt.colorbar(aspect=8, pad=0.01) cbar.set_label('Probability') plt.clim([0, 1]) _ = plt.yticks(range(1, belief_size + 1), ['Identity'] + ['Rep. {}'.format(i) for i in range(1, belief_size)]) _ = plt.gca().xaxis.set_ticks_position("bottom") def plot_maze_a_background(): # pragma: no cover plt.gca().add_collection(PatchCollection([Rectangle([-.5, -.5], 1., 1.)], facecolor='C0')) plt.gca().add_collection(PatchCollection([Rectangle([8.5, -.5], 1., 1.)], facecolor='C2')) for i in range(9): plt.plot([i + .5, i + .5], [-5., 9.5], ':k', alpha=0.5) plt.plot([-5., 9.5], [i + .5, i + .5], ':k', alpha=0.5) plt.plot([1.5, 1.5], [-.5, 7.5], 'k', linewidth=3) plt.plot([3.5, 3.5], [1.5, 9.5], 'k', linewidth=3) plt.plot([5.5, 5.5], [-.5, 7.5], 'k', linewidth=3) plt.plot([7.5, 7.5], [1.5, 9.5], 'k', linewidth=3) def plot_maze_b_background(): # pragma: no cover plt.gca().add_collection(PatchCollection([Rectangle([-.5, 8.5], 1., 1.)], facecolor='C0')) plt.gca().add_collection(PatchCollection([Rectangle([8.5, 8.5], 1., 1.)], facecolor='C2')) for i in range(9): plt.plot([i + .5, i + .5], [-5., 9.5], ':k', alpha=0.5) plt.plot([-5., 9.5], [i + .5, i + .5], ':k', alpha=0.5) plt.plot([1.5, 1.5], [1.5, 9.5], 'k', linewidth=3) plt.plot([3.5, 3.5], [-.5, 7.5], 'k', linewidth=3) plt.plot([5.5, 5.5], [1.5, 9.5], 'k', linewidth=3) plt.plot([7.5, 7.5], [-.5, 7.5], 'k', linewidth=3) def plot_maze_a_decoration(): # pragma: no cover plt.text(9.65, 0.05, 'Goal\nState', fontsize=24, horizontalalignment='left', verticalalignment='center') plt.text(-.65, -.05, 'Start\nState', fontsize=23, horizontalalignment='right', verticalalignment='center') plt.ylim([-.5, 9.5]) plt.xlim([-.5, 9.5]) _ = plt.xticks([]) _ = plt.yticks([]) plt.gca().invert_yaxis() plt.gca().set_aspect('equal') def plot_maze_b_decoration(): # pragma: no cover # plt.text(9.05, 9.05, '+1', fontsize=9, horizontalalignment='center', verticalalignment='center') plt.text(9.65, 9.05, 'Goal\nState', fontsize=24, horizontalalignment='left', verticalalignment='center') plt.text(-.65, 9.05, 'Start\nState', fontsize=24, horizontalalignment='right', verticalalignment='center') plt.ylim([-.5, 9.5]) plt.xlim([-.5, 9.5]) _ = plt.xticks([]) _ = plt.yticks([]) plt.gca().invert_yaxis() plt.gca().set_aspect('equal') def plot_double_state_space_abstraction(): # pragma: no cover plt.gca().add_collection( PatchCollection( [Polygon([[7, -.5], [9.5, -.5], [9.5, 9.5], [3, 9.5]])], facecolor='k', alpha=0.2 ) ) def plot_half_state_space_abstraction(): # pragma: no cover plt.gca().add_collection(
import matplotlib # matplotlib.use('Agg') import matplotlib.pyplot as plt from deep_rl import * FOLDER = '/home/hod/Desktop/DeepRL-Bi-Res-DDPG/img' def plot(**kwargs): kwargs.setdefault('average', False) # kwargs.setdefault('color', 0) kwargs.setdefault('top_k', 0) # kwargs.setdefault('top_k_perf', lambda x: np.mean(x[-20:])) kwargs.setdefault('max_timesteps', 1e8) kwargs.setdefault('episode_window', 100) kwargs.setdefault('x_interval', 1000) kwargs.setdefault('down_sample', False) plotter = Plotter() names = plotter.load_log_dirs(**kwargs) data = plotter.load_results(names, episode_window=kwargs['episode_window'], max_timesteps=kwargs['max_timesteps']) print('') if kwargs['average']: color = kwargs['color'] x, y = plotter.average(data, kwargs['x_interval'], kwargs['max_timesteps'], top_k=kwargs['top_k']) print(y.shape) if kwargs['down_sample']: indices = np.linspace(0, len(x) - 1, 500).astype(np.int) x = x[indices] y = y[:, indices] name = names[0].split('/')[-1] plotter.plot_standard_error(y, x, label=name, color=Plotter.COLORS[color]) # sns.tsplot(y, x, condition=name, , ci='sd') plt.title(names[0]) else: for i, name in enumerate(names): x, y = data[i] if 'color' not in kwargs.keys(): color = Plotter.COLORS[i] else: color = Plotter.COLORS[kwargs['color']] plt.plot(x, y, color=color, label=name if i == 0 else '') plt.legend() if 'y_lim' in kwargs.keys(): plt.ylim(kwargs['y_lim']) plt.xlabel('timesteps') plt.ylabel('episode return') def ddpg_plot(**kwargs): kwargs.setdefault('average', True) kwargs.setdefault('color', 0) kwargs.setdefault('top_k', 0) kwargs.setdefault('max_timesteps', 1e8) kwargs.setdefault('max_x_len', None) kwargs.setdefault('type', 'mean') kwargs.setdefault('data', False) kwargs.setdefault('window', 0) plotter = Plotter() names = plotter.load_log_dirs(**kwargs) data = plotter.load_results(names, episode_window=kwargs['window'], max_timesteps=kwargs['max_timesteps']) if len(data) == 0: print('File not found') return data = [y[: len(y) // kwargs['rep'] * kwargs['rep']] for x, y in data] min_y = np.min([len(y) for y in data]) data = [y[:min_y] for y in data] new_data = [] for y in data: y = np.reshape(np.asarray(y), (-1, kwargs['rep'])).mean(-1) x = np.arange(y.shape[0]) * kwargs['x_interval'] max_x_len = kwargs['max_x_len'] if max_x_len is not None: x = x[:max_x_len] y = y[:max_x_len] new_data.append([x, y]) data = new_data if kwargs['top_k']: scores = [] for x, y in data: scores.append(np.sum(y)) best = list(reversed(np.argsort(scores))) best = best[:kwargs['top_k']] data = [data[i] for i in best] if kwargs['data']: return np.asarray([entry[1] for entry in data]) print('') color = kwargs['color'] if kwargs['average']: x = data[0][0] y = [entry[1] for entry in data] y = np.stack(y) if kwargs['type'] == 'mean': plotter.plot_standard_error(y, x, label=kwargs['label'], color=Plotter.COLORS[color]) elif kwargs['type'] == 'median': plotter.plot_median_std(y, x, label=kwargs['label'], color=Plotter.COLORS[color]) else: raise NotImplementedError else: for i, (x, y) in enumerate(data): plt.plot(x, y, color=Plotter.COLORS[color], label=names[i] if i == 0 else '') def plot_ddpg_variants(type='mean'): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, 'type': type, 'window': 0, } game = 'dm-walker-stand' cfgs = ['bi', 'oo', 'tt', 'to', 'ot'] titles = ['Bi-Res-DDPG', 'Res-DDPG', 'TT-Res-DDPG', 'TO-Res-DDPG', 'OT-Res-DDPG'] patterns = [ 'net_cfg_%s-residual_0-run', 'net_cfg_%s-residual_0\.05-run', 'net_cfg_%s-residual_0\.1-run', 'net_cfg_%s-residual_0\.2-run', 'net_cfg_%s-residual_0\.4-run', 'net_cfg_%s-residual_0\.8-run', 'net_cfg_%s-residual_1\.0-run', ] labels = [ r'$\eta=0$', r'$\eta=0.05$', r'$\eta=0.1$', r'$\eta=0.2$', r'$\eta=0.4$', r'$\eta=0.8$', r'$\eta=1$', ] l = len(cfgs) plt.figure(figsize=(l * 6, 5)) plt.rc('text', usetex=True) plt.tight_layout() for i, cfg in enumerate(cfgs): plt.subplot(1, l, i + 1) for j, p in enumerate(patterns): ddpg_plot(pattern='.*residual-params/%s-%s.*' % (game, p % (cfg)), color=j, label=labels[j], **kwargs) plt.title(titles[i], fontsize=30, fontweight="bold") plt.ylim([0, 1000]) plt.xticks([0, int(1e6)], ['0', r'$10^6$']) plt.tick_params(axis='x', labelsize=30) plt.tick_params(axis='y', labelsize=30) plt.xlabel('Steps', fontsize=30) if i == 2: plt.legend(fontsize=17, frameon=False) if not i: plt.ylabel('Episode Return', fontsize=30) else: plt.tick_params(labelleft=False) plt.savefig('%s/ddpg-variants-%s.pdf' % (FOLDER, type), bbox_inches='tight') plt.show() def extract_auc_data(): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, } games = [ 'dm-acrobot-swingup', 'dm-acrobot-swingup_sparse', 'dm-ball_in_cup-catch', 'dm-cartpole-swingup', 'dm-cartpole-swingup_sparse', 'dm-cartpole-balance', 'dm-cartpole-balance_sparse', 'dm-cheetah-run', 'dm-finger-turn_hard', 'dm-finger-spin', 'dm-finger-turn_easy', 'dm-fish-upright', 'dm-fish-swim', 'dm-hopper-stand', 'dm-hopper-hop', 'dm-humanoid-stand', 'dm-humanoid-walk', 'dm-humanoid-run', 'dm-manipulator-bring_ball', 'dm-pendulum-swingup', 'dm-point_mass-easy', 'dm-reacher-easy', 'dm-reacher-hard', 'dm-swimmer-swimmer15', 'dm-swimmer-swimmer6', 'dm-walker-stand', 'dm-walker-walk', 'dm-walker-run', ] patterns = [ 'remark_residual-residual_0\.05-target_net_residual_True-run', 'remark_residual-residual_0-target_net_residual_True-run', ] names = [] improvements = [] for game in games: AUC = [] for p in patterns: data = ddpg_plot(pattern='.*residual-ddpg/%s-%s.*' % (game, p), data=True, **kwargs) AUC.append(data.mean(0).sum()) improvements.append((AUC[0] - AUC[1]) / AUC[1]) names.append(game[3:]) print(names[-1], improvements[-1]) with open('./data/residual/auc.bin', 'wb') as f: pickle.dump([names, improvements], f) def plot_auc_improvements(): with open('./data/residual/auc.bin', 'rb') as f: games, improvements = pickle.load(f) indices = list(reversed(np.argsort(improvements))) games = [games[i] for i in indices] improvements = [improvements[i] for i in indices] print(np.median(improvements), np.mean(improvements)) for g, i in zip(games, improvements): print(g, i) x = np.arange(len(improvements)) plt.tight_layout() plt.bar(x, improvements) plt.xticks(x, games, rotation=-90) plt.gca().invert_yaxis() yticks = np.arange(-1, 4, 1) plt.yticks(yticks, ['-100%', '0', '100%', '200%', '300%'], rotation=-90, verticalalignment='center') # plt.ylabel('AUC Improvement', rotation=-90) plt.savefig('%s/ddpg-auc.pdf' % (FOLDER), bbox_inches='tight') plt.show() def plot_oracle(type='mean'): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, 'type': type, } games = [ 'HalfCheetah-v2', 'Walker2d-v2', 'Hopper-v2', 'Swimmer-v2', 'Humanoid-v2', ] patterns = [ 'action_noise_0\.1-live_action_False-plan_steps_1-residual_0-target_net_residual_True-run', 'action_noise_0\.1-live_action_False-plan_steps_1-residual_0\.2-target_net_residual_False-run', 'MVE_3-plan_False-run', ] labels = [ 'Dyna-DDPG', 'Res-Dyna-DDPG', 'MVE-DDPG', ] l = len(games) plt.figure(figsize=(l * 6, 5)) plt.rc('text', usetex=True) plt.tight_layout() for j, game in enumerate(games): plt.subplot(1, l, j + 1) ddpg_plot(pattern='.*mujoco-baseline/%s-%s.*' % (game, 'remark_ddpg-run'), color=0, label='DDPG', **kwargs) for i, p in enumerate(patterns): ddpg_plot(pattern='.*oracle-ddpg/%s-%s.*' % (game, p), color=i + 1, label=labels[i], **kwargs) plt.title(game, fontsize=30, fontweight="bold") plt.xticks([0, int(1e6)], ['0', r'$10^6$']) plt.tick_params(axis='x', labelsize=30) plt.tick_params(axis='y', labelsize=25) plt.xlabel('Steps', fontsize=30) if not j: plt.ylabel('Episode Return', fontsize=30) plt.legend(fontsize=17, frameon=False) plt.savefig('%s/ddpg-oracle-%s.png' % (FOLDER, type), bbox_inches='tight') plt.show() def plot_dyna(type='mean'): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, 'type': type, } games = [ 'HalfCheetah-v2', 'Walker2d-v2', 'Hopper-v2', 'Swimmer-v2', 'Humanoid-v2', ] patterns = [ 'action_noise_0.1-plan_steps_1-residual_0-skip_False-target_net_residual_True-run', 'action_noise_0.1-plan_steps_1-residual_0\.2-skip_False-target_net_residual_False-run', 'MVE_3-plan_False-skip_False-run', ] labels = [ 'Dyna-DDPG', 'Res-Dyna-DDPG', 'MVE-DDPG', ] l = len(games) plt.figure(figsize=(l * 6, 5)) plt.rc('text', usetex=True) plt.tight_layout() for j, game in enumerate(games): plt.subplot(1, l, j + 1) if j == 0: label = None else: label = 'DDPG' ddpg_plot(pattern='.*mujoco-baseline/%s-%s.*' % (game, 'remark_ddpg-run'), color=0, label=label, **kwargs) for i, p in enumerate(patterns): if j == 0: label = None else: label = labels[i] ddpg_plot(pattern='.*mve-ddpg/%s-%s.*' % (game, p), color=i + 1, label=label, **kwargs) ddpg_plot(pattern='.*dyna-ddpg-main/%s-%s.*' % (game, p), color=i + 1, label=label, **kwargs) if j == 0: p = 'action_noise_0\.1-plan_steps_1-residual_0\.05-skip_False-target_net_residual_False-run' label = r'Res-Dyna-DDPG($\eta=0.05$)' ddpg_plot(pattern='.*dyna-ddpg-extra/%s-%s.*' % (game, p), color=i + 2, label=label, **kwargs) plt.title(game, fontsize=30, fontweight="bold") plt.xticks([0, int(1e6)], ['0', r'$10^6$']) plt.tick_params(axis='x', labelsize=30) plt.tick_params(axis='y', labelsize=25) plt.xlabel('Steps', fontsize=30) if not j: plt.ylabel('Episode Return', fontsize=30) if j == 1 or j == 0: plt.legend(fontsize=17, frameon=False) plt.savefig('%s/ddpg-dyna-%s.png' % (FOLDER, type), bbox_inches='tight') plt.show() def plot_mf_ddpg(type='mean'): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, 'type': type, } games = [ 'dm-acrobot-swingup', 'dm-acrobot-swingup_sparse', 'dm-ball_in_cup-catch', 'dm-cartpole-swingup', 'dm-cartpole-swingup_sparse', 'dm-cartpole-balance', 'dm-cartpole-balance_sparse', 'dm-cheetah-run', 'dm-finger-turn_hard', 'dm-finger-spin', 'dm-finger-turn_easy', 'dm-fish-upright', 'dm-fish-swim', 'dm-hopper-stand', 'dm-hopper-hop', 'dm-humanoid-stand', 'dm-humanoid-walk', 'dm-humanoid-run', 'dm-manipulator-bring_ball', 'dm-pendulum-swingup', 'dm-point_mass-easy', 'dm-reacher-easy', 'dm-reacher-hard', 'dm-swimmer-swimmer15', 'dm-swimmer-swimmer6', 'dm-walker-stand', 'dm-walker-walk', 'dm-walker-run', ] patterns = [ 'remark_residual-residual_0-target_net_residual_True-run', 'remark_residual-residual_0\.05-target_net_residual_True-run', ] labels = [ 'DDPG', r'Bi-Res-DDPG($\eta=0.05$)', ] l = len(games) n_col = 4 n_row = l // n_col width = 5 plt.figure(figsize=(n_col * width, n_row * width)) plt.rc('text', usetex=True) plt.tight_layout() for j, game in enumerate(games): plt.subplot(n_row, n_col, j + 1) for i, p in enumerate(patterns): ddpg_plot(pattern='.*dm-residual-ddpg/%s-%s.*' % (game, p), color=i, label=labels[i], **kwargs) title = game[3:].replace('_', '\\_') plt.title(title, fontsize=30, fontweight="bold") plt.tick_params(axis='x', labelsize=30) plt.tick_params(axis='y', labelsize=20) if not j: plt.legend(fontsize=17, frameon=False) if j % n_col == 0: plt.ylabel('Episode Return', fontsize=30) if j >= l - n_col: plt.xlabel('Steps', fontsize=30) plt.xticks([0, int(1e6)], ['0', r'$10^6$']) else: plt.tick_params(labelbottom=False) plt.savefig('%s/ddpg-mf-%s.png' % (FOLDER, type), bbox_inches='tight') plt.show() def plot_rebuttal(): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, } games = [ 'HalfCheetah-v2', 'Walker2d-v2', 'Hopper-v2', 'Swimmer-v2', 'Humanoid-v2', ] patterns = [ 'remark_ddpg-run', 'remark_residual-residual_0\.05-target_net_residual_True-run', ] labels = [ 'DDPG', r'Bi-Res-DDPG($\eta=0.05$)', ] l = len(games) plt.figure(figsize=(l * 6, 5)) plt.rc('text', usetex=True) plt.tight_layout() for j, game in enumerate(games): plt.subplot(1, l, j + 1) for i, p in enumerate(patterns): ddpg_plot(pattern='.*residual-ddpg/%s-%s.*' % (game, p), color=i, name=game, label=labels[i], **kwargs) ddpg_plot(pattern='.*mujoco-baseline/%s-%s.*' % (game, p), color=i, name=game, label=labels[i], **kwargs) if j == 0: plt.legend() plt.title(game, fontsize=30, fontweight="bold") plt.xticks([0, int(1e6)], ['0', r'$10^6$']) plt.tick_params(axis='x', labelsize=30) plt.tick_params(axis='y', labelsize=25) plt.xlabel('Steps', fontsize=30) if not j: plt.ylabel('Episode Return', fontsize=30) plt.savefig('%s/rebuttal.pdf' % (FOLDER), bbox_inches='tight') plt.show() def extract_auc_data_mujoco(): kwargs = { 'x_interval': int(1e4), 'rep': 20, 'average': True, 'max_x_len': 101, 'top_k': 0, } games = [ 'HalfCheetah-v2', 'Walker2d-v2', 'Hopper-v2', 'Swimmer-v2', 'Humanoid-v2', ] patterns = [ 'remark_residual-residual_0\.05-target_net_residual_True-run', 'remark_ddpg-run', ] names = [] improvements = [] for game in games: AUC = [] for i, p in enumerate(patterns): if i == 0: data = ddpg_plot(pattern='.*residual-ddpg/%s-%s.*' % (game, p), data=True, **kwargs) else: data = ddpg_plot(pattern='.*mujoco-baseline/%s-%s.*' % (game, p), data=True, **kwargs) AUC.append(data.mean(0).sum()) improvements.append((AUC[0] - AUC[1]) / AUC[1]) names.append(game) print(names[-1], improvements[-1]) with open('./data/residual/auc_mujoco.bin', 'wb') as f: pickle.dump([names, improvements], f) def plot_auc_improvements_all(): with open('./data/residual/auc.bin', 'rb') as f: dm_games, dm_improvements = pickle.load(f) with open('./data/residual/auc_mujoco.bin', 'rb') as f: mj_games, mj_improvements = pickle.load(f) games = dm_games + mj_games improvements = dm_improvements + mj_improvements indices = list(reversed(np.argsort(improvements))) games = [games[i] for i in indices] improvements = [improvements[i] for i in indices] print(np.median(improvements), np.mean(improvements)) for g, i in zip(games, improvements): print(g, i) x =
<reponame>Takishima/mindquantum # -*- coding: utf-8 -*- # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Parameter resolver.""" import copy import json import numbers import warnings from typing import Iterable import numpy as np from mindquantum import mqbackend as mb from mindquantum.utils.f import ( is_two_number_close, join_without_empty, string_expression, ) from mindquantum.utils.type_value_check import _check_input_type, _check_int_type def is_type_upgrade(origin_v, other_v): """Check whether type upgraded.""" tmp = origin_v + other_v return not isinstance(tmp, type(origin_v)) class ParameterResolver: """ A ParameterRsolver can set the parameter of parameterized quantum gate or parameterized quantum circuit. By specific which part of parameters needs to calculate gradient, the PQC operator can only calculate gradient of these parameters. Args: data (Union[dict, numbers.Number, str, ParameterResolver]): initial parameter names and its values. If data is a dict, the key will be the parameter name and the value will be the parameter value. If data is a number, this number will be the constant value of this parameter resolver. If data is a string, then this string will be the only parameter with coefficient be 1. Default: None. const (number.Number): the constant part of this parameter resolver. Default: None. dtype (type): the value type of this parameter resolver. Default: numpy.float64. Examples: >>> from mindquantum.core import ParameterResolver >>> pr = ParameterResolver({'a': 0.3}) >>> pr['b'] = 0.5 >>> pr.no_grad_part('a') {'a': 0.3, 'b': 0.5}, const: 0.0 >>> pr *= 2 >>> pr {'a': 0.6, 'b': 1.0}, const: 0.0 >>> pr.expression() '3/5*a + b' >>> pr.const = 0.5 >>> pr.expression() '3/5*a + b + 1/2' >>> pr.no_grad_parameters {'a'} >>> ParameterResolver(3) {}, const: 3.0 >>> ParameterResolver('a') {'a': 1.0}, const: 0.0 """ def __init__(self, data=None, const=None, dtype=None): """Initialize a ParameterResolver object.""" if dtype is None: if isinstance(data, (complex, np.complex128)): dtype = np.complex128 else: dtype = np.float64 if dtype not in (np.float64, np.complex128, float, complex): raise ValueError(f"dtype requires np.float64 or np.complex128, but get {dtype}") if dtype == float: dtype = np.float64 if dtype == complex: dtype = np.complex128 self.dtype = dtype if dtype == np.float64: obj = mb.real_pr else: obj = mb.complex_pr if data is None: data = {} if isinstance(data, numbers.Number): if const is not None: raise ValueError("data and const cannot not both be number.") const = self.dtype(data) data = {} self.obj = obj(data, const) if isinstance(data, str): if const is None: const = 0 _check_input_type('const', numbers.Number, const) self.obj = obj({data: self.dtype(1)}, self.dtype(const)) elif isinstance(data, self.__class__): self.obj = copy.copy(data.obj) dtype = self.dtype self.dtype = data.dtype self.astype(dtype, True) if const is not None: _check_input_type('const', numbers.Number, const) self.obj.set_const(self.dtype(const)) elif isinstance(data, dict): for k, v in data.items(): _check_input_type("parameter name", str, k) _check_input_type("parameter value", numbers.Number, v) if not k.strip(): raise KeyError("parameter name cannot be empty string.") if const is None: const = 0 _check_input_type("const", numbers.Number, const) const = self.dtype(const) self.obj = obj({i: self.dtype(j) for i, j in data.items()}, const) else: raise TypeError( f"data requires a number or a string or a dict or a ParameterResolver, but get {type(data)}!" ) def astype(self, dtype, inplace=False): """ Change the data type of this parameter resolver. Args: dtype (type): The type of data. inplace (bool): Whether to change the type inplace. Returns: ParameterResolver, the parameter resolver with given data type. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> import numpy as np >>> pr = PR({'a': 1.0}, 2.0) >>> pr {'a': 1.0}, const: 2.0 >>> pr.astype(np.complex128, inplace=True) >>> pr {'a': (1+0j)}, const: (2+0j) """ if dtype == complex: dtype = np.complex128 if dtype == float: dtype = np.float64 if dtype not in (np.float64, np.complex128): raise ValueError(f"dtype requires np.float64 or np.complex128, but get {dtype}") _check_input_type('inplace', bool, inplace) if inplace: if dtype != self.dtype: if self.dtype == np.complex128 and dtype == np.float64: warnings.warn( "Casting complex parameter resolver to float parameter \ resolver discards the imaginary part." ) if self.obj.is_complex_pr(): self.obj = self.obj.real() else: if not self.obj.is_complex_pr(): self.obj = self.obj.to_complex() self.dtype = dtype return self new = copy.copy(self) new = new.astype(dtype, inplace=True) return new @property def const(self): """ Get the constant part of this parameter resolver. Returns: numbers.Number, the constant part of this parameter resolver. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR({'a': 1}, 2.5) >>> pr.const 2.5 """ return self.obj.const @const.setter def const(self, const_value): """Setter method for const.""" _check_input_type('const value', numbers.Number, const_value) if isinstance(const_value, complex): const_value = np.complex128(const_value) if is_type_upgrade(self.const, const_value): self.astype(type(const_value), inplace=True) self.obj.set_const(self.dtype(const_value)) def __len__(self): """ Get the number of parameters in this parameter resolver. Please note that the parameter with 0 coefficient is also considered. Returns: int, the number of all parameters. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> a = PR({'a': 0, 'b': 1}) >>> a.expression() 'b' >>> len(a) 2 """ return len(self.obj) def keys(self): """ Yield an iterator to the name of all parameters. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> a = PR({'a': 0, 'b': 1}) >>> list(a.keys()) ['a', 'b'] """ for k in range(len(self)): yield self.obj.get_key(k) def values(self): """ Yield an iterator to the value of all parameters. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> a = PR({'a': 0, 'b': 1}) >>> list(a.values()) [0.0, 1.0] """ for k in self.keys(): yield self.obj[k] def items(self): """ Yield an iterator to the name and value of all parameters. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> a = PR({'a': 0, 'b': 1}) >>> list(a.items()) [('a', 0.0), ('b', 1.0)] """ for i in range(len(self)): key = self.obj.get_key(i) yield (key, self.obj[key]) def is_const(self): """ Check whether this parameter resolver represents a constant number. This means that there is no non zero parameter in this parameter resolver. Returns: bool, whether this parameter resolver represent a constant number. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR(1.0) >>> pr.is_const() True """ return self.obj.is_const() def __bool__(self): """ Check whether this parameter resolver has non zero constant or parameter with non zero coefficient. Returns: bool, False if this parameter resolver represent zero and True if not. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR(0) >>> bool(pr) False """ return bool(self.obj) def __setitem__(self, keys, values): """ Set the value of parameter in this parameter resolver. You can set multiple values of multiple parameters with given iterable keys and values. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR(0) >>> pr['a'] = 2.5 >>> pr.expression() '5/2*a' """ if isinstance(keys, str): _check_input_type("parameter name", str, keys) _check_input_type("parameter value", numbers.Number, values) if not keys.strip(): raise KeyError("parameter name cannot be empty string.") if is_type_upgrade(self.dtype(0), values): self.astype(type(values), True) self.obj[keys] = self.dtype(values) elif isinstance(keys, Iterable): if not isinstance(values, Iterable): raise ValueError("Values should be iterable.") if len(values) != len(keys): raise ValueError("Size of keys and values do not match.") for k, v in zip(keys, values): self[k] = v else: raise TypeError("Parameter name should be a string, but get {}!".format(type(keys))) def __getitem__(self, key): """ Get the parameter value from this parameter resolver. Returns: numbers.Number, the parameter value. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR({'a': 1, 'b': 2}) >>> pr['a'] 1.0 """ _check_input_type('key', str, key) return self.obj[key] def __iter__(self): """ Yield the parameter name. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR({'a': 1, 'b': 2}) >>> list(pr) ['a', 'b'] """ for i in self.keys(): yield i def __contains__(self, key): """ Check whether the given key is in this parameter resolver or not. Examples: >>> from mindquantum.core import ParameterResolver as PR >>> pr = PR({'a': 1, 'b': 2}) >>> 'c' in pr False """ _check_input_type('key', str, key) return key in self.obj def get_cpp_obj(self): """Get the
Free)", callback_data=callbackdata)] ) txtKeyboard = "Please select download location:" reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, txtKeyboard, reply_markup, False ) else: self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"No paths were found, Please set them up in " f"Sonarr and radarr, " f"{update.effective_user.first_name}." ) def selectLanguage(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaid, 3: Quality if data[1] == "serie": if self.sonarr_enabled: languages = self.sonarrNode.language_profile() profiles = self.sonarrNode.quality_profile() for p in profiles: if p.id == int(data[3]): self.logChoice(update, p.name) else: if self.radarr_enabled: pass if languages: keyboard = [] for language in languages: callbackdata = ( f"selectRootFolder:{data[1]}:{data[2]}:" f"{data[3]}:{language.id}" ) keyboard.append([InlineKeyboardButton( f"{language.name}", callback_data=callbackdata)] ) reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, "Please select language profile:", reply_markup, False ) else: self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"No language profiles were found, Please set them up in " f"Sonarr, " f"{update.effective_user.first_name}." ) def selectAvailability(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaid, 3: Quality if data[1] == "serie": if self.sonarr_enabled: return else: if self.radarr_enabled: profiles = self.sonarrNode.quality_profile() for p in profiles: if p.id == int(data[3]): self.logChoice(update, p.name) if self.availability: keyboard = [] for count, availability in enumerate(self.availability): callbackdata = ( f"selectRootFolder:{data[1]}:{data[2]}:" f"{data[3]}:{count}" ) keyboard.append([InlineKeyboardButton( f"{availability}", callback_data=callbackdata)] ) reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, "Please select availability:", reply_markup, False ) else: self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"No availability was found, Please set them up in " f"INIT, " f"{update.effective_user.first_name}." ) def showMetaInfo(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:title args = [] args.append(data[2]) self.findMedia(update, context, query, data[1], args) def showMediaInfo(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaID if data[1] == "serie": if self.sonarr_enabled: media = self.sonarrNode.get_series(series_id=int(data[2])) tagLabels_to_keep = self.tags_to_keep_sonarr else: if self.radarr_enabled: media = self.radarrNode.get_movie(movie_id=int(data[2])) tagLabels_to_keep = self.tags_to_keep_radarr self.logChoice(update, f"{media.title} ({media.year})") self.outputMediaInfo(update, context, data[1], media) keyboard = [] # Get ID's for keeping movies anyway tagsIDs_to_keep = self.getIDsforTagLabels( data[1], tagLabels_to_keep) # IF in the media there are not "KEEP" tags, # then show delete button if not set(media.tagsIds) & set(tagsIDs_to_keep) or \ self.isAdmin(update): # Show button if: # - If "only users can delete own media" is enabled # and it is their own media # - If "only users can delete own media" is disabled # - User is an Admin if (self.users_can_only_delete_own_media and self.getUsertagID(update, data[1]) in media.tagsIds) \ or not self.users_can_only_delete_own_media or \ self.isAdmin(update): callbackdata = (f"deletemedia:{data[1]}:{data[2]}") if self.isAdmin(update) or \ self.users_permanent_delete_media: callbackdata += ":True" else: callbackdata += ":False" keyboard.append([InlineKeyboardButton( f"Delete '{media.title} ({media.year})'", callback_data=callbackdata)] ) tagLabels_to_extend = self.tags_to_extend_radarr tagIDs_To_Extend = self.getIDsforTagLabels( data[1], tagLabels_to_extend) if data[1] == "movie" and \ not set(media.tagsIds) & set(tagIDs_To_Extend): callbackdata = (f"extendperiod:{data[1]}:{data[2]}") keyboard.append([InlineKeyboardButton( f"Extend '{media.title} ({media.year})' " f"with {self.extend_by_days} days", callback_data=callbackdata)] ) tagLabels_to_keep = self.tags_to_keep_radarr tagIDs_To_Keep = self.getIDsforTagLabels( data[1], tagLabels_to_keep) if not set(media.tagsIds) & set(tagIDs_To_Keep) \ and self.isAdmin(update): callbackdata = (f"keepmedia:{data[1]}:{data[2]}") keyboard.append([InlineKeyboardButton( f"Keep '{media.title} ({media.year})'", callback_data=callbackdata)] ) if data[1] == "movie" and not media.hasFile: callbackdata = (f"searchmedia:{data[1]}") keyboard.append([InlineKeyboardButton( f"Search '{media.title} ({media.year})'", callback_data=callbackdata)] ) if keyboard: reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, "Actions:", reply_markup, False ) def deleteQueueItem(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:queueID if data[1] == "episode": if self.sonarr_enabled: self.sonarr_node.delete_queue(int(data[2])) else: if self.radarr_enabled: self.radarr_node.delete_queue(int(data[2])) self.notifyDeleteQueueItem(update, context, data[1], data[2]) def deleteMedia(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaID, 3:delete_files if data[1] == "serie": # Get ID's for exclusion list movies tagLabels_for_exclusion = \ self.sonarr_tags_exclusion tagsIDs_for_exclusion = self.getIDsforTagLabels( data[1], tagLabels_for_exclusion) media = self.sonarrNode.get_series(series_id=data[2]) if self.sonarr_enabled: self.sonarrNode.delete_series( series_id=int(data[2]), tvdb_id=None, addImportListExclusion=True if set(media.tagsIds) & set(tagsIDs_for_exclusion) else False, deleteFiles=data[3] ) else: # Get ID's for exclusion list movies tagLabels_for_exclusion = \ self.radarr_tags_exclusion tagsIDs_for_exclusion = self.getIDsforTagLabels( data[1], tagLabels_for_exclusion) media = self.radarrNode.get_movie(movie_id=data[2]) if self.radarr_enabled: self.radarrNode.delete_movie( movie_id=int(data[2]), tmdb_id=None, imdb_id=None, addImportExclusion=True if set(media.tagsIds) & set(tagsIDs_for_exclusion) else False, deleteFiles=data[3] ) self.logChoice(update, f"Delete {data[1]}") self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"The {data[1]} has been deleted." ) def downloadMedia(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, # 1:type of media, # 2:mediaid # 3:qualityid, # 4:Langausge Profile / Availability # 5:rootfolder # 6:Download which seasons? self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, "Please be patient..." ) if data[1] == "serie": if self.sonarr_enabled: self.logChoice(update, data[6]) media = self.sonarrNode.get_series(tvdb_id=data[2]) self.pixlovarrdata["stitle"] = media.title self.savedata( self.pixlovarr_data_file, self.pixlovarrdata) # get usertag from server and to movie usertagID = self.getUsertagID(update, data[1]) if not usertagID: tagName = self.createTagName( update.effective_user.first_name, update.effective_user.id ) newTag = self.sonarrNode.create_tag(tagName) usertagID = newTag.id tags = [] tags.append(usertagID) media.add( int(data[5]), int(data[3]), int(data[4]), data[6], self.sonarr_season_folder, True, False, "standard", tags ) self.notifyDownload( update, context, data[1], media.title, media.year) else: if self.radarr_enabled: media = self.radarrNode.get_movie(imdb_id=data[2]) self.pixlovarrdata["mtitle"] = media.title self.savedata( self.pixlovarr_data_file, self.pixlovarrdata) # get usertag from server and to movie usertagID = self.getUsertagID(update, data[1]) if not usertagID: tagName = self.createTagName( update.effective_user.first_name, update.effective_user.id ) newTag = self.radarrNode.create_tag(tagName) usertagID = newTag.id tags = [] tags.append(usertagID) availability = str(self.availability[int(data[4])]) media.add( int(data[5]), int(data[3]), True, True, availability, tags ) self.notifyDownload( update, context, data[1], media.title, media.year) def showDownloadSummary(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaid if data[1] == "serie": if self.sonarr_enabled: profiles = self.sonarrNode.quality_profile() callbackdata = f"selectlang:{data[1]}:{data[2]}" media = self.sonarrNode.get_series(tvdb_id=data[2]) else: if self.radarr_enabled: profiles = self.radarrNode.quality_profile() callbackdata = f"selectAvailability:{data[1]}:{data[2]}" media = self.radarrNode.get_movie(imdb_id=data[2]) self.logChoice(update, f"{media.title} ({media.year})") self.outputMediaInfo(update, context, data[1], media) keyboard = [] row = [] num_columns = 2 if profiles: profiles.sort(key=self.sortOnNameDict) for count, p in enumerate(profiles): if data[1] == "serie": row.append(InlineKeyboardButton( f"{p.name}", callback_data=f"{callbackdata}:{p.id}") ) else: row.append(InlineKeyboardButton( f"{p.name}", callback_data=f"{callbackdata}:{p.id}") ) if (count+1) % num_columns == 0 or \ count == len(profiles)-1: keyboard.append(row) row = [] else: self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"No profiles were found, Please set them up in" f"Sonarr and Radarr, " f"{update.effective_user.first_name}." ) return reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, "Please select media quality:", reply_markup, False ) def selectDownload(self, update, context): if not self.isBlocked(update) and self.isGranted(update): query = update.callback_query query.answer() data = query.data.split(":") # 0:marker, 1:type of media, 2:mediaid, 3: Quality # 4: Langauge Profile, 5: RootFolder callbackdata = ( f"downloadmedia:{data[1]}:{data[2]}:{data[3]}:" f"{data[4]}:{data[5]}" ) if data[1] == "serie": if self.sonarr_enabled: root_paths = self.sonarrNode.root_folder() for r in root_paths: if r.id == int(data[5]): self.logChoice(update, r.path) keyboard = [ [InlineKeyboardButton( "Download All seasons", callback_data=f"{callbackdata}:all")], [InlineKeyboardButton( "Download Future seasons", callback_data=f"{callbackdata}:future")], [InlineKeyboardButton( "Download missing seasons", callback_data=f"{callbackdata}:missing")], [InlineKeyboardButton( "Download existing seasons", callback_data=f"{callbackdata}:existing")], [InlineKeyboardButton( "Download only pilot episode", callback_data=f"{callbackdata}:pilot")], [InlineKeyboardButton( "Download first season", callback_data=f"{callbackdata}:firstSeason")], [InlineKeyboardButton( "Download lastest season", callback_data=f"{callbackdata}:latestSeason")], [InlineKeyboardButton( "Download no seasons", callback_data=f"{callbackdata}:none")] ] else: if self.radarr_enabled: root_paths = self.radarrNode.root_folder() for r in root_paths: if r.id == int(data[5]): self.logChoice(update, r.path) media = self.radarrNode.get_movie(imdb_id=data[2]) keyboard = [[InlineKeyboardButton( f"Download '{media.title} ({media.year})'", callback_data=f"{callbackdata}:False")]] reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( query, "Please confirm:", reply_markup, False ) def findMedia(self, update, context, query, typeOfMedia, args): ranking = "" if len(args) > 0: ranking = args[0] if re.match("^[Tt]\\d+$", ranking): context.args.pop(0) if ' '.join(args): topAmount = self.getTopAmount(update, context, ranking) searchQuery = ' '.join(args) self.sendmessage( update.effective_chat.id, context, update.effective_user.first_name, f"Searching for {typeOfMedia}s..." ) if typeOfMedia == "serie": if self.sonarr_enabled: media = self.sonarrNode.search_series(term=searchQuery) else: if self.radarr_enabled: media = self.radarrNode.search_movies(term=searchQuery) if media: keyboard = [] keyboardPresentMedia = [] maxResults = topAmount - 1 for m in media: if m.id: # Media found in database callbackdata = f"showMediaInfo:{typeOfMedia}:{m.id}" keyboardPresentMedia.append([InlineKeyboardButton( f"{m.title} ({m.year})", callback_data=callbackdata)] ) maxResults += 1 continue # media is already in collection if typeOfMedia == "serie": callbackdata = ( f"showdlsummary:{typeOfMedia}:{m.tvdbId}") if not m.tvdbId: maxResults += 1 continue # serie doesn't have ID else: callbackdata = ( f"showdlsummary:{typeOfMedia}:{m.imdbId}") if not m.imdbId: maxResults += 1 continue # movie doesn't have ID keyboard.append([InlineKeyboardButton( f"{m.title} ({m.year})", callback_data=callbackdata)] ) if media.index(m) == maxResults: break if query is not None: message = query else: message = update if keyboardPresentMedia: reply_markup_PresentMedia = InlineKeyboardMarkup( keyboardPresentMedia) self.replytext( message, f"We found these {typeOfMedia}s in your catalog:", reply_markup_PresentMedia, False ) reply_markup = InlineKeyboardMarkup(keyboard) self.replytext( message, "We found
""" super(PerspectiveManager, self).__init__() self.toolbarItems = {} self.createAuiManager() pub.subscribe(self.__onObjectAdded, 'perspectiveClicked') pub.subscribe(self.__onUpdatePageText, 'onUpdatePageText') self.accel_tbl = wx.AcceleratorTable([ (wx.ACCEL_CTRL, ord('N'), ID_NEW), (wx.ACCEL_CTRL, ord('Y'), ID_REDO), (wx.ACCEL_CTRL, ord('Z'), ID_UNDO), (wx.ACCEL_CTRL, ord('C'), ID_COPY), (wx.ACCEL_CTRL, ord('V'), ID_PASTE), (wx.ACCEL_CTRL, ord('X'), ID_CUT), (wx.ACCEL_CTRL | wx.ACCEL_ALT, wx.WXK_DOWN, ID_DUPLICATE_LINE), (wx.ACCEL_CTRL, ord('S'), ID_SAVE), (wx.ACCEL_CTRL, ord('H'), ID_SEARCH_FILE), (wx.ACCEL_CTRL | wx.ACCEL_SHIFT, ord('F'), ID_FORMAT_FILE), (wx.ACCEL_CTRL | wx.ACCEL_SHIFT , ord('R'), ID_RESOURCE), (wx.ACCEL_CTRL | wx.ACCEL_SHIFT , ord('T'), ID_OPEN_TYPE), # (wx.ACCEL_CTRL, ord('V'), wx.ID_PASTE), # (wx.ACCEL_ALT, ord('X'), wx.ID_PASTE), # (wx.ACCEL_SHIFT | wx.ACCEL_ALT, ord('Y'), wx.ID_PASTE) ]) self.SetAcceleratorTable(self.accel_tbl) def __onUpdatePageText(self, filePath, extra1, extra2=None): # no longer need to access data through message.data. logger.info(f'PerspectiveManager.__onUpdatePageText: {filePath}') viewToolbar = self._mgr.GetPane("viewToolbar") print(extra1) toolSave = viewToolbar.window.FindTool(ID_SAVE) toolSaveAll = viewToolbar.window.FindTool(ID_SAVE_ALL) toolSaveAll.state = aui.AUI_BUTTON_STATE_NORMAL toolSave.state = aui.AUI_BUTTON_STATE_NORMAL logger.info(toolSave.state) self.updateTitle(title=filePath) self._mgr.Update() if extra2: print(extra2) def __onObjectAdded(self, data, extra1, extra2=None): # no longer need to access data through message.data. print('PerspectiveManager', repr(data), 'is added') print(extra1) if extra2: print(extra2) def createAuiManager(self): logger.debug('createAuiManager') # tell FrameManager to manage this frame self._mgr = MyAuiManager() self._mgr.SetManagedWindow(self) # set up default notebook style self._notebook_style = aui.AUI_NB_DEFAULT_STYLE | wx.BORDER_NONE self._notebook_theme = 1 # min size for the frame itself isn't completely done. # see the end up AuiManager.Update() for the test # code. For now, just hard code a frame minimum size self.SetMinSize(wx.Size(100, 100)) self._perspectives = [] # add a bunch of panes # self._mgr.AddPane(self.CreateSizeReportCtrl(), wx.aui.AuiPaneInfo().Name("test1").Caption("Pane Caption").Top().CloseButton(True).MaximizeButton(True)) # add the toolbars to the manager # topToolBar = wx.BoxSizer(wx.HORIZONTAL) # topToolBar.Add(self.constructToolBar(),1,wx.ALIGN_LEFT,4) # note the 2nd param 'proportion' is 1 # #topToolBar.AddStretchSpacer() # topToolBar.Add(self.constructToolBar(),0,wx.ALIGN_RIGHT,4) self._mgr.AddPane(self.constructViewToolBar(), aui.AuiPaneInfo(). Name("viewToolbar").Caption("View Toolbar"). ToolbarPane().Top().Row(1).Position(1).CloseButton(True). LeftDockable(False).RightDockable(False).Gripper(True)) self._mgr.AddPane(self.constructPerspectiveToolBar(), aui.AuiPaneInfo(). Name("perspectiveToolbar").Caption("Perspective Toolbar"). ToolbarPane().Top().Row(1).Position(1).CloseButton(True). LeftDockable(False).RightDockable(False).Gripper(True), self.definePoint()) # self._mgr.AddPane(self.creatingFileExplorer(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="file_explorer.png")).BestSize(500, -1). # Name("fileExplorer").Caption("File Explorer").Dockable(True).Movable(True).MinSize(500, -1).Resizable(True). # Left().Layer(1).Position(2).CloseButton(True).MaximizeButton(True).MinimizeButton(True)) # self._mgr.AddPane(self.creatingTreeCtrl(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="folder_database.png")).BestSize(500, -1). # Name("databaseNaviagor").Caption("Database Navigator").Dockable(True).Movable(True).MinSize(500, -1). # Left().Layer(1).Position(1).CloseButton(True).MaximizeButton(True).MinimizeButton(True), target=self._mgr.GetPane("fileExplorer")) self._mgr.AddPane(WelcomePanel(self), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="welcome16.png")).BestSize(500, -1). Name("onWelcome").Caption("Welcome").Dockable(True).Movable(True).MinSize(500, -1).CaptionVisible(visible=True).Direction(wx.TOP). Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True)) # self._mgr.AddPane(wx.Panel(self), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="variable_view.png")).BestSize(500, -1). # Name("variableView").Caption("Variable").Dockable(True).Movable(True).MinSize(500, -1).CaptionVisible(visible=True).Direction(wx.TOP). # Right().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True)) # self._mgr.AddPane(self.constructCenterPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="script.png")). # Name("centerPane").Caption("Center Pane").LeftDockable(True).Direction(wx.TOP). # Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).CaptionVisible(visible=True), target=self._mgr.GetPane("onWelcome")) # self._mgr.AddPane(self.getWorksheet(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="script.png")). # Name("Worksheet-0").Caption("Worksheet-0").LeftDockable(True).Direction(wx.TOP). # Center().Layer(0).Position(0).CloseButton(True).MaximizeButton(True).MinimizeButton(True).CaptionVisible(visible=True), target=self._mgr.GetPane("onWelcome")) # self._mgr.AddPane(self.constructSchemaViewerPane(), aui.AuiPaneInfo().Icon(wx.Bitmap(os.path.join(path, "script.png"))). # Name("schemaViewer").Caption("Schema Viewer").LeftDockable(True). # Center().CloseButton(True).MaximizeButton(True).MinimizeButton(True)) # self._mgr.AddPane(self.constructSchemaViewerPane(), aui.AuiPaneInfo(). # Name("test9").Caption("Min Size 200x100"). # BestSize(wx.Size(200, 100)).MinSize(wx.Size(200, 100)). # Bottom().Layer(1).CloseButton(True).MaximizeButton(True)) # self._mgr.AddPane(self.sqlConsoleOutputPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="console_view.png")). # Name("consoleOutput").Caption("Console").Dockable(True).Movable(True).LeftDockable(True).BestSize(wx.Size(500, 400)).MinSize(wx.Size(500, 400)). # Bottom().Layer(0).Row(1).CloseButton(True).MaximizeButton(visible=True).MinimizeButton(visible=True).PinButton(visible=True).GripperTop()) # self._mgr.AddPane(self.constructHistoryPane(), aui.AuiPaneInfo().Icon(self.fileOperations.getImageBitmap(imageName="sql.png")). # Name("sqlLog").Caption("SQL Log").Dockable(True).BestSize(wx.Size(500, 400)).MinSize(wx.Size(500, 400)). # Bottom().Layer(0).Row(1).CloseButton(True).MaximizeButton(visible=True).MinimizeButton(visible=True), target=self._mgr.GetPane("consoleOutput")) self._mgr.GetPane("onWelcome").Show() viewToolbar = self._mgr.GetPane("viewToolbar") viewToolbar.Show() self._mgr.GetPane("variableView").Show() perspectiveToolbar = self._mgr.GetPane("perspectiveToolbar") perspectiveToolbar.dock_row = viewToolbar.dock_row perspectiveToolbar.Show() self.perspective_default = self._mgr.SavePerspective() perspective_all = self._mgr.SavePerspective() self.setStyleToPanes() all_panes = self._mgr.GetAllPanes() # "commit" all changes made to FrameManager self._mgr.Update() # some more event self.Bind(aui.EVT_AUI_PANE_CLOSE, self.OnPaneClose) self.Bind(aui.EVT_AUINOTEBOOK_ALLOW_DND, self.OnAllowNotebookDnD) self.Bind(aui.EVT_AUINOTEBOOK_PAGE_CLOSE, self.OnNotebookPageClose) self.Bind(aui.EVT_AUI_PANE_FLOATING, self.OnFloatDock) self.Bind(aui.EVT_AUI_PANE_FLOATED, self.OnFloatDock) self.Bind(aui.EVT_AUI_PANE_DOCKING, self.OnFloatDock) self.Bind(aui.EVT_AUI_PANE_DOCKED, self.OnFloatDock) self.Bind(wx.EVT_CLOSE, self.OnClose) self.Bind(wx.EVT_TIMER, self.TimerHandler) self.timer = wx.Timer(self) self.timer.Start(100) ####################################################################################### def definePoint(self): ''' right align toolbar ''' managed_window = self._mgr.GetManagedWindow() wnd_pos = managed_window.GetPosition() (x, y) = wnd_size = managed_window.GetSize() point = wx.Point(x - ((len(self.perspectiveList) - 1) * 32) + 5, 0) return point def OnPaneClose(self, event): logger.debug("OnPaneClose") # if event.pane.name == "test10": # msg = "Are you sure you want to " # if event.GetEventType() == aui.wxEVT_AUI_PANE_MINIMIZE: # msg += "minimize " # else: # msg += "close/hide " # # res = wx.MessageBox(msg + "this pane?", "AUI", wx.YES_NO, self) # if res != wx.YES: # event.Veto() def OnAllowNotebookDnD(self, event): # for the purpose of this test application, explicitly # allow all noteboko drag and drop events event.Allow() def OnNotebookPageClose(self, event): logger.debug("OnNotebookPageClose") ctrl = event.GetEventObject() # if isinstance(ctrl.GetPage(event.GetSelection()), wx.html.HtmlWindow): # # res = wx.MessageBox("Are you sure you want to close/hide this notebook page?", # "AUI", wx.YES_NO, self) # if res != wx.YES: # event.Veto() def OnFloatDock(self, event): paneLabel = event.pane.caption etype = event.GetEventType() strs = "Pane %s " % paneLabel if etype == aui.wxEVT_AUI_PANE_FLOATING: strs += "is about to be floated" if event.pane.name == "test8" and self._veto_tree: event.Veto() strs += "... Event vetoed by user selection!" logger.debug(strs) return elif etype == aui.wxEVT_AUI_PANE_FLOATED: strs += "has been floated" elif etype == aui.wxEVT_AUI_PANE_DOCKING: strs += "is about to be docked" if event.pane.name == "test11" and self._veto_text: event.Veto() strs += "... Event vetoed by user selection!" logger.debug(strs) return elif etype == aui.wxEVT_AUI_PANE_DOCKED: strs += "has been docked" logger.debug(strs) def __del__(self): self.timer.Stop() def OnClose(self, event): self.timer.Stop() self._mgr.UnInit() event.Skip() def TimerHandler(self, event): try: self.gauge.Pulse() except: self.timer.Stop() ####################################################################################### def setStyleToPanes(self): all_panes = self._mgr.GetAllPanes() for pane in all_panes: if isinstance(pane.window, aui.AuiNotebook): nb = pane.window nb.SetAGWWindowStyleFlag(self._notebook_style) nb.SetArtProvider(aui.ChromeTabArt()) nb.Refresh() nb.Update() def constructPerspectiveToolBar(self): # tb1 = aui.AuiToolBar(self, -1, agwStyle=aui.AUI_TB_DEFAULT_STYLE | wx.NO_BORDER) tb1 = EclipseAuiToolbar(self) self.perspectiveList = [ [ID_OTHER_PERSPECTIVE, "Open Perspective", 'new_persp.png', 'Open Perspective', None], [], [ID_JAVA_PERSPECTIVE, "Java", 'jperspective.png', 'Java', self.onPerspeciveSelection], [ID_JAVA_EE_PERSPECTIVE, "Java EE", 'javaee_perspective.png', 'Java EE', self.onPerspeciveSelection], [ID_DEBUG_PERSPECTIVE, "Debug", 'debug_persp.png', 'Debug', self.onPerspeciveSelection], [ID_PYTHON_PERSPECTIVE, "Python", 'python_perspective.png', 'Python', self.onPerspeciveSelection], [ID_DATABASE_PERSPECTIVE, "Database", 'database.png', 'Database', self.onPerspeciveSelection], [ID_GIT_PERSPECTIVE, "Git", 'gitrepository.png', 'Git', self.onPerspeciveSelection], [ID_RESOURCE_PERSPECTIVE, "Resources", 'resource_persp.png', 'Resources', self.onPerspeciveSelection], [ID_CALIBRE_PERSPECTIVE, "Calibre", 'vl_16.png', 'Calibre', self.onPerspeciveSelection], ] for perspectiveName in self.perspectiveList: if len(perspectiveName) > 1: toolBarItem = tb1.AddSimpleTool(perspectiveName[0], perspectiveName[1], self.fileOperations.getImageBitmap(imageName=perspectiveName[2]), short_help_string=perspectiveName[3]) if perspectiveName[4]: self.Bind(wx.EVT_MENU, perspectiveName[4], id=perspectiveName[0]) if toolBarItem.label == 'Python': self.selectedPerspectiveName = 'python' tb1.SetPressedItem(toolBarItem) else: tb1.AddSeparator() return tb1 # def onOpenPerspecitve(self, event): # logger.debug('onOpenPerspecitve') def selectItem(self, id=None): perspectiveToolbar = self._mgr.GetPane("perspectiveToolbar") item = perspectiveToolbar.window.getToolBarItemById(id) perspectiveToolbar.window.EnableTool(item, True) # def hideTools(self,viewToolbar.window, perspectiveName): # pass def viewToolBarByPerspective(self, perspectiveName): viewToolbar = self._mgr.GetPane("viewToolbar") # viewToolbar.window.DeleteTool(wx.ID_PREFERENCES) self.constructViewToolBar(viewToolbar.window, perspectiveName) s = viewToolbar.window.GetMinSize() viewToolbar.BestSize(s) allowedInstanceForProspective = [ # SqlConsoleOutputPanel, py.shell.Shell, PythonExplorerPanel, DataSourcePanel, CreatingJavaExplorerPanel, FileBrowser, ] if self.selectedPerspectiveName == 'database': allowedInstanceForProspective.remove(DataSourcePanel) elif self.selectedPerspectiveName == 'python': allowedInstanceForProspective.remove(PythonExplorerPanel) allowedInstanceForProspective.remove(py.shell.Shell) elif self.selectedPerspectiveName == 'java': allowedInstanceForProspective.remove(CreatingJavaExplorerPanel) elif self.selectedPerspectiveName == 'resource': allowedInstanceForProspective.remove(FileBrowser) elif self.selectedPerspectiveName == 'java': allowedInstanceForProspective.remove(CreatingJavaExplorerPanel) elif self.selectedPerspectiveName == 'git': allowedInstanceForProspective.remove(CreatingJavaExplorerPanel) # for pane in self._mgr.GetAllPanes(): # if pane.window: # for instance in allowedInstanceForProspective : # if isinstance(pane.window, instance): # self._mgr.ClosePane(pane) # pane.window.Destroy() # pane.DestroyOnClose(True) if self.selectedPerspectiveName == 'database': self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3) self.openPanel(name="databaseNaviagor", imageName="folder_database.png", captionName="Database Navigator", tabDirection=4) elif self.selectedPerspectiveName == 'python': self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3) self.openPanel(name="pythonShellView", imageName="shell.png", captionName="Python Shell", tabDirection=3) self.openPanel(name="pythonPackageExplorer", imageName="package_explorer.png", captionName="Python Package Explorer", tabDirection=4) elif self.selectedPerspectiveName == 'resource': self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3) self.openPanel(name="fileExplorer", imageName="file_explorer.png", captionName="File Explorer", tabDirection=4) elif self.selectedPerspectiveName == 'java': self.openPanel(name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3) self.openPanel(name="javaPackageExplorer", imageName="package_explorer.png", captionName="Java Package Explorer", tabDirection=4) elif self.selectedPerspectiveName == 'calibre': self.openPanel(name="bookBrowser", imageName="library-16.png", captionName="Book Browser", tabDirection=5) self.openPanel(name="bookExplorer", imageName="package_explorer.png", captionName="Book Explorer", tabDirection=4) # else: # databaseNaviagorPane = self._mgr.GetPane("databaseNaviagor") # databaseNaviagorPane.Show(False) for pane in self._mgr.GetAllPanes(): if pane.window: for instance in allowedInstanceForProspective : if isinstance(pane.window, instance): self._mgr.ClosePane(pane) for pane in self._mgr.GetAllPanes(): if pane.window: logger.debug(f'pane.window:{pane.window}, pane.window.IsShown():{pane.window.IsShown()}') self.appendSubMenu(menuBar=self.GetMenuBar(), selectedPerspectiveName=self.selectedPerspectiveName) self._mgr.Update() print('viewToolBarByPerspective') # def openPanel(self, name="consoleOutput", imageName="console_view.png", captionName="Console", tabDirection=3): # # name="consoleOutput" # pane = self._mgr.GetPane(name) # panel = wx.Panel(self) # if pane.window == None: # if name == "consoleOutput": # panel = SqlConsoleOutputPanel(self) # elif name == "databaseNaviagor": # panel = DataSourcePanel(self) # elif name == "pythonPackageExplorer": # panel = CreatingPythonExplorerPanel(self) # elif name == "projectExplorerView": # panel = CreatingProjectExplorerPanel(self) # elif name == "javaPackageExplorer": # panel = CreatingJavaExplorerPanel(self) # elif name == "pythonShellView": # intro = f'{py.version.VERSION}' # panel = py.shell.Shell(self, -1, introText=intro) # elif name == "terminalView": # panel = CreatingPythonExplorerPanel(self) # elif name == "navigatorView": # panel = CreatingPythonExplorerPanel(self) # elif name == "tasksView": # panel = CreatingPythonExplorerPanel(self) # elif name == "fileExplorer": # panel = FileBrowser(self, size=(500, 300)) # elif name == "bookExplorer": # panel = BookExplorerPanel(self, size=(500, 300)) # # self._mgr.addTabByWindow(panel, imageName=imageName, name=name , captionName=captionName, tabDirection=tabDirection) # elif not pane.IsShown(): # pane.dock_direction = tabDirection # window = pane.window # if window: # window.Show() # pane.Show(True) # # item.state=4 def onPerspeciveSelection(self, event): logger.debug('onPerspeciveSelection') # pub.sendMessage('perspectiveClicked', data=42, extra1='onJavaPerspective') self.selectItem(event.Id) if event.Id == ID_CALIBRE_PERSPECTIVE: self.selectedPerspectiveName = 'calibre' self.viewToolBarByPerspective(self.selectedPerspectiveName) if event.Id == ID_JAVA_PERSPECTIVE: self.selectedPerspectiveName = 'java' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_JAVA_EE_PERSPECTIVE: self.selectedPerspectiveName = 'java ee' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_DEBUG_PERSPECTIVE: self.selectedPerspectiveName = 'debug' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_PYTHON_PERSPECTIVE: self.selectedPerspectiveName = 'python' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_DATABASE_PERSPECTIVE: self.selectedPerspectiveName = 'database' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_GIT_PERSPECTIVE: self.selectedPerspectiveName = 'git' self.viewToolBarByPerspective(self.selectedPerspectiveName) elif event.Id == ID_RESOURCE_PERSPECTIVE: self.selectedPerspectiveName = 'resource' self.viewToolBarByPerspective(self.selectedPerspectiveName) def constructViewToolBar(self, toobar=None, perspectiveName='python'): # create some toolbars # tb1 = aui.AuiToolBar(self, -1, agwStyle=aui.AUI_TB_DEFAULT_STYLE | wx.NO_BORDER) if toobar == None: self._ctrl = None toobar = EclipseAuiToolbar(self) # id, leble, imageName, lebel, method,setToolDropdown , list of perspective, initial state(disable/enable ), kind=wx.ITEM_CHECK tools = [ (ID_NEW, "New", "new_con.png", 'New', self.onNewMenu, True, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL), (), (ID_SAVE, "Save (Ctrl+S)", "save.png", 'Save (Ctrl+S)', self.onSave, False, ['resource', 'python', 'java', 'debug', 'java ee', 'database'], False, wx.ITEM_NORMAL), (ID_SAVE_ALL, "Save All (Ctrl+Shift+S)", "saveall_edit.png", 'Save All (Ctrl+Shift+S)', self.onSaveAll, False, ['resource', 'python', 'java', 'debug', 'java ee', 'database'], False, wx.ITEM_NORMAL), (ID_BUILD_ALL, "Build All (Ctrl+B)", "build_exec.png", "Build All (Ctrl+B)", None, False, [ 'python', 'java', 'java ee'], True, wx.ITEM_NORMAL), (ID_TERMINAL, "Open a Terminal", "linux_terminal.png", "Open a Terminal (Ctrl+Shift+Alt+T)", self.onOpenTerminal, False, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_NORMAL), (), (ID_SKIP_ALL_BREAKPOINTS, "Skip All Breakpoints (Ctrl+Alt+B)", "skip_brkp.png", "Skip All Breakpoints (Ctrl+Alt+B)", self.onSkipAllBreakPoints, False, ['resource', 'python', 'java', 'debug', 'java ee'], True, wx.ITEM_CHECK), (ID_NEW_JAVA_PACKAGE, "New Java Package", "newpack_wiz.png", "New Java Package", self.onOpenTerminal, False, ['resource', 'java'], True, wx.ITEM_NORMAL), (ID_NEW_JAVA_CLASS, "New Java Class", "newclass_wiz.png", "New Java
import struct import curses from time import sleep from threading import Timer import pickle from .exceptions import * import fingerpi as fp class RepeatingTimer(object): def __init__(self, interval, f, *args, **kwargs): self.interval = interval self._f = f self.args = args self.kwargs = kwargs self.timer = None def callback(self): self._f(*self.args, **self.kwargs) self.start() def cancel(self): self.timer.cancel() def start(self): self.timer = Timer(self.interval, self.callback) self.timer.start() port = '/dev/ttyAMA0' MENU = "menu" COMMAND = "command" EXITMENU = "exitmenu" BAUDRATES = [9600, # 14400, 19200, # 28800, 38400, # 56000, 57600, 115200] ## NOTE: `curses.window` is passed as the first argument to every function! menu_data = { 'title': "GT-511C3 UART", 'type': MENU, 'subtitle': "Please select an option...", 'options':[ { 'title': "Initialize", 'type': COMMAND, 'command': 'Initialize', 'kwargs':{} }, { 'title': "Open", 'type': COMMAND, 'command': 'Open', 'kwargs':{} }, { 'title': "Change Baudrate", 'type': MENU, 'subtitle': 'Please select and option...', 'options': [ { 'title': str(x), 'type': COMMAND, 'command': 'ChangeBaudrate', 'kwargs': {'baudrate': x} } for x in BAUDRATES ]}, { 'title': "Blink", 'type': COMMAND, 'command': 'Blink', 'kwargs':{} }, { 'title': "Enroll Sequence", 'type': COMMAND, 'command': '', 'kwargs':{} }, { 'title': "All Commands", 'type': MENU, 'subtitle': "Please select an option...", 'options': [ { 'title': "Open", 'type': COMMAND, 'command': 'Open', 'kwargs':{} }, { 'title': "Close", 'type': COMMAND, 'command': 'Close', 'kwargs':{} }, { 'title': "USB Internal Check", 'type': COMMAND, 'command': 'UsbInternalCheck', 'kwargs':{} }, { 'title': "LED on/off", 'type': COMMAND, 'command': 'CmosLed', 'kwargs':{} }, { 'title': "Get Enroll Count", 'type': COMMAND, 'command': 'GetEnrollCount', 'kwargs':{} }, { 'title': "Check Enrolled", 'type': COMMAND, 'command': 'CheckEnrolled', 'kwargs':{} }, { 'title': "Start Enrollment", 'type': COMMAND, 'command': 'EnrollStart', 'kwargs':{} }, { 'title': "Is Finger Pressed?", 'type': COMMAND, 'command': 'IsPressFinger', 'kwargs':{} }, { 'title': "Get Image", 'type': COMMAND, 'command': 'GetImage', 'kwargs':{} }, ]}, ] } class Commands(): ## Every method has to return `status` array of size 2 def __init__(self): self._f = None self.status = 'Uninitialized...' self._led = None self.open = False self._status_template = r'%s; Baudrate: %s; Firmware ver.: %s; Serial #: %s' self._baudrate = 'N/A' self._firmware = 'N/A' self._serial_no = 'N/A' def _update_status(self): if self.open: __status = 'Open' else: __status = 'Closed' self.status = self._status_template % ( __status, str(self._baudrate), str(self._firmware), str(self._serial_no) ) def Initialize(self, *args, **kwargs): if self._f is not None: raise AlreadyInitializedError('This device is already initialized') try: self._f = fp.FingerPi(port = port) except IOError as e: raise PortError(str(e)) # self._status = 'Initialized' # Change that to `closed` self._update_status() return [None, None] def Open(self, *args, **kwargs): if self.open: raise AlreadyOpenError('This device is already open') if self._f is None: raise NotInitializedError('Please, initialize first!') # self._f.serial.reset_input_buffer() response = self._f.Open(extra_info = True, check_baudrate = True) if response[0]['ACK']: data = struct.unpack('II16B', response[1]['Data']) # serial_number = bytearray(data[2:]) self._baudrate = response[0]['Parameter'] self._firmware = data[0] self._serial_no = str(bytearray(data[2:])).encode('hex') self.open = True # Show the default status iff NOT initialized! self._update_status() else: raise NackError(response[0]['Parameter']) return [None, None] def Blink(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') screen = args[0] y, x = screen.getmaxyx() screen.border(0) screen.addstr(0, 1, 'Press any button to stop...'[:x-2], curses.A_STANDOUT) t = RepeatingTimer(0.5, self.CmosLed, screen) t.start() screen.refresh() inp = screen.getch() if inp: t.cancel() self.CmosLed(led = False) self._led = False return ['', None] #################################################################### ## All (other) commands: def Close(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.Close() if not response[0]['ACK']: raise NackError(response[0]['Parameter']) self.open = False self._update_status() return [None, None] def UsbInternalCheck(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.UsbInternalCheck() if reponse[0]['ACK']: return ['USB Internal Check returned: ' + str(response[0]['Parameter']), None] else: raise NackError(response[0]['Parameter']) def CmosLed(self, *args, **kwargs): # Need screen for popup window # Several modes of operation: # 1) If no argument is given - toggle LED # 2) If named boolean argument `led` is given - set the led to specified value # 3) If positional argument is given - don't return the result, show the result on a separate curses.window if not self.open: raise NotOpenError('Please, open the port first!') if self._led is None: self._led = True else: self._led = not self._led if kwargs.get('led', None) is not None: self._led = kwargs['led'] response = self._f.CmosLed(self._led) # response = [{'ACK': True}] if response[0]['ACK']: if len(args) > 0: # Screen is given, show a message args[0].addstr(2, 2, 'LED is set to ' + (' ON' if self._led else 'OFF')) args[0].refresh() return ['', None] else: # Screen is not given, return the message return ['LED is set to ' + ('ON' if self._led else 'OFF'), None] else: raise NackError(response[0]['Parameter']) def ChangeBaudrate(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') rate = int(kwargs['baudrate']) if not (9600 <= rate <= 115200): raise ValueError('Incorrect baudrate: ' + str(args[0])) response = self._f.ChangeBaudrate(rate) if response[0]['ACK']: self._baudrate = str(rate) self._update_status() return [None, None] else: self.open = False self._baudrate = 'Unknown' self._update_status() raise NackError("Couldn't change baudrate: " + str(response[0]['Parameter'])) def GetEnrollCount(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.GetEnrollCount() if response[0]['ACK']: return ['Number of enrolled fingerprints: ' + str(response[0]['Parameter']), None] else: raise NackError(response[0]['Parameter']) def CheckEnrolled(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') screen = args[0] y, x = screen.getmaxyx() # screen.border(0) # screen.addstr(0, 1, 'Enter the ID to check, or empty field to exit...'[:x-2], curses.A_STANDOUT) curses.echo() while True: screen.addstr(2, 2, '>>> ') screen.clrtoeol() screen.border(0) screen.addstr(0, 1, 'Enter the ID to check, or empty field to exit...'[:x-2], curses.A_STANDOUT) ID = screen.getstr(2, 6) if ID.isdigit(): response = self._f.CheckEnrolled(int(ID)) if response[0]['ACK']: screen.addstr(3, 2, 'ID in use!') screen.clrtoeol() else: screen.addstr(3, 2, response[0]['Parameter']) screen.clrtoeol() elif ID.isalnum(): curses.noecho() raise ValueError('Non-numeric value found!') else: break curses.noecho() return [None, None] def IsPressFinger(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.IsPressFinger() if response[0]['ACK']: if response[0]['Parameter'] == 0: # Finger is pressed return [True, None] else: return [False, None] else: raise NackError(response[0]['Parameter']) def EnrollStart(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') screen = args[0] y, x = screen.getmaxyx() # screen.border(0) # screen.addstr(0, 1, 'Enter the ID to check, or empty field to exit...'[:x-2], curses.A_STANDOUT) curses.echo() ret = [False, None] while True: screen.addstr(2, 2, '>>> ') screen.clrtoeol() screen.border(0) screen.addstr(0, 1, 'Enter a new ID for enrollment, or empty field to cancel...'[:x-2], curses.A_STANDOUT) ID = screen.getstr(2, 6) if ID.isdigit(): response = self._f.EnrollStart(int(ID)) if response[0]['ACK']: # screen.addstr(3, 2, 'ID in use!') # screen.clrtoeol() ret[0] = 'Enrollment of ID {0:d} started'.format(response[0]['Parameter']) break else: screen.addstr(3, 2, response[0]['Parameter']) screen.clrtoeol() elif ID.isalnum(): curses.noecho() raise ValueError('Non-numeric value found!') else: break curses.noecho() return ret def Enroll1(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.Enroll1() if not response[0]['ACK']: if response[0]['ACK'] in errors: err = response[0]['ACK'] else: err = 'Duplicate ID: ' + str(response[0]['ACK']) raise NackError(err) return [None, None] def Enroll2(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.Enroll1() if not response[0]['ACK']: if response[0]['ACK'] in errors: err = response[0]['ACK'] else: err = 'Duplicate ID: ' + str(response[0]['ACK']) raise NackError(err) return [None, None] def Enroll3(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') response = self._f.Enroll1() if not response[0]['ACK']: if response[0]['ACK'] in errors: err = response[0]['ACK'] else: err = 'Duplicate ID: ' + str(response[0]['ACK']) raise NackError(err) if self._f.save: return [str(len(response[1]['Data'])) + ' bytes received... And purged!', None] return [None, None] def DeleteID(self, *args, **kwargs): if not self.open: raise NotOpenError('Please, open the port first!') screen = args[0] y, x = screen.getmaxyx() # screen.border(0) # screen.addstr(0, 1, 'Enter the ID to check, or empty field to exit...'[:x-2], curses.A_STANDOUT) curses.echo() ret = [False, None] while True: screen.addstr(2, 2, '>>> ') screen.clrtoeol() screen.border(0) screen.addstr(0, 1, 'Enter an ID to delete, or empty field to cancel...'[:x-2], curses.A_STANDOUT) ID = screen.getstr(2, 6) if ID.isdigit(): response = self._f.DeleteID(int(ID)) if response[0]['ACK']: # screen.addstr(3, 2, 'ID in use!') # screen.clrtoeol() ret[0] = 'ID {0:d} deleted'.format(ID) break else: screen.addstr(3, 2, response[0]['Parameter']) screen.clrtoeol() elif ID.isalnum(): curses.noecho() raise ValueError('Non-numeric value found!') else: break curses.noecho() return ret def DeleteAll(self, *args, **kwargs): if not
key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in set_sessionComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "set_session"------- for thisComponent in set_sessionComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) # the Routine "set_session" was not non-slip safe, so reset the non-slip timer routineTimer.reset() # ------Prepare to start Routine "Instr_Staircase"------- continueRoutine = True # update component parameters for each repeat key_resp_5.keys = [] key_resp_5.rt = [] _key_resp_5_allKeys = [] # keep track of which components have finished Instr_StaircaseComponents = [Instr_staircase_txt, key_resp_5] for thisComponent in Instr_StaircaseComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # reset timers t = 0 _timeToFirstFrame = win.getFutureFlipTime(clock="now") Instr_StaircaseClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip frameN = -1 # -------Run Routine "Instr_Staircase"------- while continueRoutine: # get current time t = Instr_StaircaseClock.getTime() tThisFlip = win.getFutureFlipTime(clock=Instr_StaircaseClock) tThisFlipGlobal = win.getFutureFlipTime(clock=None) frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *Instr_staircase_txt* updates if Instr_staircase_txt.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance: # keep track of start time/frame for later Instr_staircase_txt.frameNStart = frameN # exact frame index Instr_staircase_txt.tStart = t # local t and not account for scr refresh Instr_staircase_txt.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(Instr_staircase_txt, 'tStartRefresh') # time at next scr refresh Instr_staircase_txt.setAutoDraw(True) # *key_resp_5* updates waitOnFlip = False if key_resp_5.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance: # keep track of start time/frame for later key_resp_5.frameNStart = frameN # exact frame index key_resp_5.tStart = t # local t and not account for scr refresh key_resp_5.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(key_resp_5, 'tStartRefresh') # time at next scr refresh key_resp_5.status = STARTED # keyboard checking is just starting waitOnFlip = True win.callOnFlip(key_resp_5.clock.reset) # t=0 on next screen flip win.callOnFlip(key_resp_5.clearEvents, eventType='keyboard') # clear events on next screen flip if key_resp_5.status == STARTED and not waitOnFlip: theseKeys = key_resp_5.getKeys(keyList=['c'], waitRelease=False) _key_resp_5_allKeys.extend(theseKeys) if len(_key_resp_5_allKeys): key_resp_5.keys = _key_resp_5_allKeys[-1].name # just the last key pressed key_resp_5.rt = _key_resp_5_allKeys[-1].rt # a response ends the routine continueRoutine = False # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in Instr_StaircaseComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "Instr_Staircase"------- for thisComponent in Instr_StaircaseComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) thisExp.addData('Instr_staircase_txt.started', Instr_staircase_txt.tStartRefresh) thisExp.addData('Instr_staircase_txt.stopped', Instr_staircase_txt.tStopRefresh) # the Routine "Instr_Staircase" was not non-slip safe, so reset the non-slip timer routineTimer.reset() # set up handler to look after randomisation of conditions etc hear_staircase_loop = data.TrialHandler(nReps=tot_sc_trials, method='sequential', extraInfo=expInfo, originPath=-1, trialList=[None], seed=None, name='hear_staircase_loop') thisExp.addLoop(hear_staircase_loop) # add the loop to the experiment thisHear_staircase_loop = hear_staircase_loop.trialList[0] # so we can initialise stimuli with some values # abbreviate parameter names if possible (e.g. rgb = thisHear_staircase_loop.rgb) if thisHear_staircase_loop != None: for paramName in thisHear_staircase_loop: exec('{} = thisHear_staircase_loop[paramName]'.format(paramName)) for thisHear_staircase_loop in hear_staircase_loop: currentLoop = hear_staircase_loop # abbreviate parameter names if possible (e.g. rgb = thisHear_staircase_loop.rgb) if thisHear_staircase_loop != None: for paramName in thisHear_staircase_loop: exec('{} = thisHear_staircase_loop[paramName]'.format(paramName)) # ------Prepare to start Routine "Instr_staircase"------- continueRoutine = True # update component parameters for each repeat stair_sound.setSound('sound_cue.wav', hamming=True) stair_sound.setVolume(curr_volume, log=False) hear_sc_resp.keys = [] hear_sc_resp.rt = [] _hear_sc_resp_allKeys = [] # keep track of which components have finished Instr_staircaseComponents = [hear_sc_fix, stair_sound, hear_sc_resp, hc_instr1, hc_instr2] for thisComponent in Instr_staircaseComponents: thisComponent.tStart = None thisComponent.tStop = None thisComponent.tStartRefresh = None thisComponent.tStopRefresh = None if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED # reset timers t = 0 _timeToFirstFrame = win.getFutureFlipTime(clock="now") Instr_staircaseClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip frameN = -1 # -------Run Routine "Instr_staircase"------- while continueRoutine: # get current time t = Instr_staircaseClock.getTime() tThisFlip = win.getFutureFlipTime(clock=Instr_staircaseClock) tThisFlipGlobal = win.getFutureFlipTime(clock=None) frameN = frameN + 1 # number of completed frames (so 0 is the first frame) # update/draw components on each frame # *hear_sc_fix* updates if hear_sc_fix.status == NOT_STARTED and tThisFlip >= 0.2-frameTolerance: # keep track of start time/frame for later hear_sc_fix.frameNStart = frameN # exact frame index hear_sc_fix.tStart = t # local t and not account for scr refresh hear_sc_fix.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(hear_sc_fix, 'tStartRefresh') # time at next scr refresh hear_sc_fix.setAutoDraw(True) # start/stop stair_sound if stair_sound.status == NOT_STARTED and tThisFlip >= 0.2-frameTolerance: # keep track of start time/frame for later stair_sound.frameNStart = frameN # exact frame index stair_sound.tStart = t # local t and not account for scr refresh stair_sound.tStartRefresh = tThisFlipGlobal # on global time stair_sound.play(when=win) # sync with win flip # *hear_sc_resp* updates waitOnFlip = False if hear_sc_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance: # keep track of start time/frame for later hear_sc_resp.frameNStart = frameN # exact frame index hear_sc_resp.tStart = t # local t and not account for scr refresh hear_sc_resp.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(hear_sc_resp, 'tStartRefresh') # time at next scr refresh hear_sc_resp.status = STARTED # keyboard checking is just starting waitOnFlip = True win.callOnFlip(hear_sc_resp.clock.reset) # t=0 on next screen flip win.callOnFlip(hear_sc_resp.clearEvents, eventType='keyboard') # clear events on next screen flip if hear_sc_resp.status == STARTED and not waitOnFlip: theseKeys = hear_sc_resp.getKeys(keyList=['up', 'down'], waitRelease=False) _hear_sc_resp_allKeys.extend(theseKeys) if len(_hear_sc_resp_allKeys): hear_sc_resp.keys = _hear_sc_resp_allKeys[0].name # just the first key pressed hear_sc_resp.rt = _hear_sc_resp_allKeys[0].rt # a response ends the routine continueRoutine = False # *hc_instr1* updates if hc_instr1.status == NOT_STARTED and tThisFlip >= 0-frameTolerance: # keep track of start time/frame for later hc_instr1.frameNStart = frameN # exact frame index hc_instr1.tStart = t # local t and not account for scr refresh hc_instr1.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(hc_instr1, 'tStartRefresh') # time at next scr refresh hc_instr1.setAutoDraw(True) # *hc_instr2* updates if hc_instr2.status == NOT_STARTED and tThisFlip >= 0-frameTolerance: # keep track of start time/frame for later hc_instr2.frameNStart = frameN # exact frame index hc_instr2.tStart = t # local t and not account for scr refresh hc_instr2.tStartRefresh = tThisFlipGlobal # on global time win.timeOnFlip(hc_instr2, 'tStartRefresh') # time at next scr refresh hc_instr2.setAutoDraw(True) # check for quit (typically the Esc key) if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]): core.quit() # check if all components have finished if not continueRoutine: # a component has requested a forced-end of Routine break continueRoutine = False # will revert to True if at least one component still running for thisComponent in Instr_staircaseComponents: if hasattr(thisComponent, "status") and thisComponent.status != FINISHED: continueRoutine = True break # at least one component has not yet finished # refresh the screen if continueRoutine: # don't flip if this routine is over or we'll get a blank screen win.flip() # -------Ending Routine "Instr_staircase"------- for thisComponent in Instr_staircaseComponents: if hasattr(thisComponent, "setAutoDraw"): thisComponent.setAutoDraw(False) hear_staircase_loop.addData('hear_sc_fix.started', hear_sc_fix.tStartRefresh) hear_staircase_loop.addData('hear_sc_fix.stopped', hear_sc_fix.tStopRefresh) stair_sound.stop() # ensure sound has stopped at end of routine hear_staircase_loop.addData('stair_sound.started', stair_sound.tStartRefresh) hear_staircase_loop.addData('stair_sound.stopped', stair_sound.tStopRefresh) # check responses if hear_sc_resp.keys in ['', [], None]: # No response was made hear_sc_resp.keys = None hear_staircase_loop.addData('hear_sc_resp.keys',hear_sc_resp.keys) if hear_sc_resp.keys != None: # we had a response hear_staircase_loop.addData('hear_sc_resp.rt', hear_sc_resp.rt) hear_staircase_loop.addData('hear_sc_resp.started', hear_sc_resp.tStartRefresh) hear_staircase_loop.addData('hear_sc_resp.stopped', hear_sc_resp.tStopRefresh) curr_response = hear_sc_resp.keys vol_ind=hear_sc_ntrial-1 volumes_array[vol_ind]=curr_volume if curr_response=='up': curr_volume=curr_volume+curr_volume*0.25 if curr_response=='down': curr_volume=curr_volume-curr_volume*0.25 if (curr_response!=last_response) and hear_sc_ntrial!=1: curr_volume=ini_volume last_response = curr_response hear_sc_ntrial=hear_sc_ntrial+1 hear_staircase_loop.addData('hc_instr1.started', hc_instr1.tStartRefresh) hear_staircase_loop.addData('hc_instr1.stopped', hc_instr1.tStopRefresh) hear_staircase_loop.addData('hc_instr2.started', hc_instr2.tStartRefresh) hear_staircase_loop.addData('hc_instr2.stopped', hc_instr2.tStopRefresh) # the Routine "Instr_staircase" was not non-slip safe, so reset the non-slip timer routineTimer.reset() thisExp.nextEntry() # completed tot_sc_trials repeats of 'hear_staircase_loop' # ------Prepare to start Routine "set_sound_volume"------- continueRoutine =
ndim(x) == 5: if data_format == 'channels_first': if ndim(bias) == 1: x += reshape(bias, (1, bias_shape[0], 1, 1, 1)) else: x += reshape(bias, (1, bias_shape[3]) + bias_shape[:3]) elif data_format == 'channels_last': if ndim(bias) == 1: x += reshape(bias, (1, 1, 1, 1, bias_shape[0])) else: x += reshape(bias, (1,) + bias_shape) elif ndim(x) == 4: if data_format == 'channels_first': if ndim(bias) == 1: x += reshape(bias, (1, bias_shape[0], 1, 1)) else: x += reshape(bias, (1, bias_shape[2]) + bias_shape[:2]) elif data_format == 'channels_last': if ndim(bias) == 1: x += reshape(bias, (1, 1, 1, bias_shape[0])) else: x += reshape(bias, (1,) + bias_shape) elif ndim(x) == 3: if data_format == 'channels_first': if ndim(bias) == 1: x += reshape(bias, (1, bias_shape[0], 1)) else: x += reshape(bias, (1, bias_shape[1], bias_shape[0])) elif data_format == 'channels_last': if ndim(bias) == 1: x += reshape(bias, (1, 1, bias_shape[0])) else: x += reshape(bias, (1,) + bias_shape) else: x += bias return x # RANDOMNESS def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with normal distribution of values. # Arguments shape: A tuple of integers, the shape of tensor to create. mean: A float, mean of the normal distribution to draw samples. stddev: A float, standard deviation of the normal distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. # Returns A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Returns a tensor with uniform distribution of values. # Arguments shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. # Returns A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.uniform(shape, low=minval, high=maxval, dtype=dtype) def random_binomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random binomial distribution of values. # Arguments shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. # Returns A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) return rng.binomial(shape, p=p, dtype=dtype) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. # Arguments shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. # Returns A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(1, 10e6) rng = RandomStreams(seed=seed) try: return rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype, truncate=True) except TypeError: normal_t = rng.normal(size=shape, avg=mean, std=stddev, dtype=dtype) # Poor man's truncated normal: we literally clip the tensor return T.clip(normal_t, mean - 2 * stddev, mean + 2 * stddev) def random_multinomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random multinomial distribution of values. # Arguments shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of multinomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. # Returns A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) rng = RandomStreams(seed=seed) return rng.multinomial(shape, pvals=p, dtype=dtype) # COUNT SKETCH def count_sketch(h, s, x, d=16000): """Count sketch operator. See https://arxiv.org/abs/1606.01847. # Arguments h: Count sketch vector h \in \{1, d\} ^n s: Count sketch vector s \in \{-1, 1\} ^n x: Count sketch input vector d: Compact Bilinear dimension """ rval, updates = theano.scan(fn=__count_sketch, sequences=[h, s, x.dimshuffle(1, 0)], outputs_info=T.alloc(0., x.shape[0], d), non_sequences=[], n_steps=x.shape[1]) return rval[-1] # We are interested only in the last value def __count_sketch(h, s, v, # Sequences y, # Outputs info ): """Count sketch utility. See https://arxiv.org/abs/1606.01847. # Arguments h: Count sketch vector h \in \{1, d\} ^n s: Count sketch vector s \in \{-1, 1\} ^n v: Count sketch input vector y: Projected output vector """ return T.cast(T.inc_subtensor(y[:, h], T.dot(s, v)), 'float32') # 1d Convolution def scan_conv1d(u, v): """1D convolution over a set of vectors. All inputs will be treated by pairs. #x must be equal to #kernel # Arguments u: first set of vectors v: second set of vectors """ def __vec_conv(u, v, # Sequences w, # Outputs info ): u = u.dimshuffle(('x', 0)) v = v.dimshuffle(('x', 0)) conv_out = vec_conv(u, v, border_mode='full') init_cut = u.shape[1] / 2 end_cut = init_cut + u.shape[1] return conv_out[0, init_cut:end_cut] conv_out, updates = theano.scan(__vec_conv, sequences=[u, v], outputs_info=T.alloc(0., u.shape[1]), # , d), non_sequences=[], n_steps=u.shape[0]) return conv_out # Theano implementation of CTC # Used with permission from <NAME> # https://github.com/shawntan/ # Note that TensorFlow's native CTC code is significantly # faster than this def ctc_interleave_blanks(Y): Y_ = T.alloc(-1, Y.shape[0] * 2 + 1) Y_ = T.set_subtensor(Y_[T.arange(Y.shape[0]) * 2 + 1], Y) return Y_ def ctc_create_skip_idxs(Y): skip_idxs = T.arange((Y.shape[0] - 3) // 2) * 2 + 1 non_repeats = T.neq(Y[skip_idxs], Y[skip_idxs + 2]) return skip_idxs[non_repeats.nonzero()] def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev): active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()] active_next = T.cast(T.minimum( T.maximum( active + 1, T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1 ), log_p_curr.shape[0]), 'int32') common_factor = T.max(log_p_prev[:active]) p_prev = T.exp(log_p_prev[:active] - common_factor) _p_prev = zeros[:active_next] # copy over _p_prev = T.set_subtensor(_p_prev[:active], p_prev) # previous transitions _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1]) # skip transitions _p_prev = T.inc_subtensor( _p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs]) updated_log_p_prev = T.log(_p_prev) + common_factor log_p_next = T.set_subtensor( zeros[:active_next], log_p_curr[:active_next] + updated_log_p_prev ) return active_next, log_p_next def ctc_path_probs(predict, Y, alpha=1e-4): smoothed = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0] L = T.log(smoothed) zeros = T.zeros_like(L[0]) log_first = zeros f_skip_idxs = ctc_create_skip_idxs(Y) # there should be a shortcut to calculating this b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev): f_active_next, log_f_next = ctc_update_log_p( f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev) b_active_next, log_b_next = ctc_update_log_p( b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev) return f_active_next, log_f_next, b_active_next, log_b_next [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan( step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first]) idxs = T.arange(L.shape[1]).dimshuffle('x', 0) mask = ((idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]) log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L return log_probs, mask def ctc_cost(predict, Y): log_probs, mask = ctc_path_probs(predict, ctc_interleave_blanks(Y)) common_factor = T.max(log_probs) total_log_prob = T.log(T.sum(T.exp(log_probs - common_factor)[mask.nonzero()])) total_log_prob = total_log_prob + common_factor return -total_log_prob # batchifies original CTC code def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. # Arguments y_true: tensor (samples, max_string_length) containing the truth labels y_pred: tensor (samples, time_steps, num_categories) containing the prediction, or output of the softmax input_length: tensor (samples,1) containing the sequence length for each batch item in y_pred label_length: tensor (samples,1) containing the sequence length for each batch item in y_true # Returns Tensor with shape (samples,1) containing the CTC loss of each element. """ def ctc_step(y_true_step, y_pred_step, input_length_step, label_length_step): y_pred_step = y_pred_step[0: input_length_step[0]] y_true_step = y_true_step[0:label_length_step[0]] return ctc_cost(y_pred_step, y_true_step) ret, _ = theano.scan( fn=ctc_step, outputs_info=None, sequences=[y_true, y_pred, input_length, label_length] ) ret = ret.dimshuffle('x', 0) return ret # HIGH ORDER FUNCTIONS def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. # Arguments fn: Callable that will be called upon each element in elems elems: tensor, at least 2 dimensional name: A string name for the map node in the graph # Returns Tensor with first dimension equal to the elems and second depending on fn """ return theano.map(fn, elems, name=name)[0] def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn
<filename>probables/blooms/bloom.py """ BloomFilter and BloomFiter on Disk, python implementation License: MIT Author: <NAME> (<EMAIL>) URL: https://github.com/barrust/bloom """ import math import os from array import array from binascii import hexlify, unhexlify from io import BytesIO, IOBase from mmap import mmap from numbers import Number from pathlib import Path from shutil import copyfile from struct import Struct from textwrap import wrap from typing import ByteString, Tuple, Union from ..exceptions import InitializationError, NotSupportedError from ..hashes import HashFuncT, HashResultsT, KeyT, default_fnv_1a from ..utilities import MMap, is_hex_string, is_valid_file MISMATCH_MSG = "The parameter second must be of type BloomFilter or a BloomFilterOnDisk" SimpleBloomT = Union["BloomFilter", "BloomFilterOnDisk"] def _verify_not_type_mismatch(second: SimpleBloomT) -> bool: """verify that there is not a type mismatch""" return isinstance(second, (BloomFilter, BloomFilterOnDisk)) class BloomFilter: """Simple Bloom Filter implementation for use in python; It can read and write the same format as the c version (https://github.com/barrust/bloom) Args: est_elements (int): The number of estimated elements to be added false_positive_rate (float): The desired false positive rate filepath (str): Path to file to load hex_string (str): Hex based representation to be loaded hash_function (function): Hashing strategy function to use `hf(key, number)` Returns: BloomFilter: A Bloom Filter object Note: Initialization order of operations: 1) From file 2) From Hex String 3) From params """ __slots__ = [ "_on_disk", "_type", "_typecode", "_bits_per_elm", "_bloom", "_est_elements", "_fpr", "_bloom_length", "_hash_func", "_els_added", "_number_hashes", "_num_bits", ] def __init__( self, est_elements: Union[int, None] = None, false_positive_rate: Union[float, None] = None, filepath: Union[str, Path, None] = None, hex_string: Union[str, None] = None, hash_function: Union[HashFuncT, None] = None, ): # set some things up self._on_disk = False self._type = "regular" self._typecode = "B" self._bits_per_elm = 8.0 if is_valid_file(filepath): self._load(filepath, hash_function) elif is_hex_string(hex_string): self._load_hex(hex_string, hash_function) else: if est_elements is None or false_positive_rate is None: raise InitializationError("Insufecient parameters to set up the Bloom Filter") # calc values fpr, n_hashes, n_bits = self._get_optimized_params(est_elements, false_positive_rate) self._set_values(est_elements, fpr, n_hashes, n_bits, hash_function) self._bloom = array(self._typecode, [0]) * self._bloom_length # NOTE: these should be "FOOTERS" and not headers _FOOTER_STRUCT = Struct("QQf") _FOOTER_STRUCT_BE = Struct(">QQf") _FPR_STRUCT = Struct("f") _IMPT_STRUCT = Struct("B") def __contains__(self, key: KeyT) -> Union[int, bool]: """setup the `in` keyword""" return self.check(key) def __str__(self) -> str: """output statistics of the bloom filter""" on_disk = "no" if self.is_on_disk is False else "yes" stats = ( "BloomFilter:\n" "\tbits: {0}\n" "\testimated elements: {1}\n" "\tnumber hashes: {2}\n" "\tmax false positive rate: {3:.6f}\n" "\tbloom length (8 bits): {4}\n" "\telements added: {5}\n" "\testimated elements added: {6}\n" "\tcurrent false positive rate: {7:.6f}\n" "\texport size (bytes): {8}\n" "\tnumber bits set: {9}\n" "\tis on disk: {10}\n" ) return stats.format( self.number_bits, self.estimated_elements, self.number_hashes, self.false_positive_rate, self.bloom_length, self.elements_added, self.estimate_elements(), self.current_false_positive_rate(), self.export_size(), self._cnt_number_bits_set(), on_disk, ) def __bytes__(self) -> bytes: """Export bloom filter to `bytes`""" with BytesIO() as f: self.export(f) return f.getvalue() # Some Properties @property def false_positive_rate(self) -> float: """float: The maximum desired false positive rate Note: Not settable""" return self._fpr @property def estimated_elements(self) -> int: """int: The maximum number of elements estimated to be added at setup Note: Not settable""" return self._est_elements @property def number_hashes(self) -> int: """int: The number of hashes required for the Bloom Filter hashing strategy Note: Not settable""" return self._number_hashes @property def number_bits(self) -> int: """int: Number of bits in the Bloom Filter Note: Not settable""" return self._num_bits @property def elements_added(self) -> int: """int: Number of elements added to the Bloom Filter Note: Changing this can cause the current false positive rate to be reported incorrectly""" return self._els_added @elements_added.setter def elements_added(self, val: int): """set the els added""" self._els_added = val @property def is_on_disk(self) -> bool: """bool: Is the Bloom Filter on Disk or not Note: Not settable""" return self._on_disk @property def bloom_length(self) -> int: """int: Length of the Bloom Filter array Note: Not settable""" return self._bloom_length @property def bloom(self) -> array: """list(int): The bit/int array""" return self._bloom @property def hash_function(self) -> HashFuncT: """function: The hash function used Note: Not settable""" return self._hash_func # Working things def clear(self) -> None: """Clear or reset the Counting Bloom Filter""" self._els_added = 0 for idx in range(self._bloom_length): self._bloom[idx] = 0 def hashes(self, key: KeyT, depth: Union[int, None] = None) -> HashResultsT: """Return the hashes based on the provided key Args: key (str): Description of arg1 depth (int): Number of permutations of the hash to generate; if None, generate `number_hashes` Returns: List(int): A list of the hashes for the key in int form""" tmp = depth if depth is not None else self._number_hashes return self._hash_func(key, tmp) def add(self, key: KeyT) -> None: """Add the key to the Bloom Filter Args: key (str): The element to be inserted""" self.add_alt(self.hashes(key)) def add_alt(self, hashes: HashResultsT) -> None: """Add the element represented by hashes into the Bloom Filter Args: hashes (list): A list of integers representing the key to insert""" for i in range(0, self._number_hashes): k = hashes[i] % self._num_bits idx = k // 8 self._bloom[idx] = self._bloom[idx] | (1 << (k % 8)) self._els_added += 1 def check(self, key: KeyT) -> bool: """Check if the key is likely in the Bloom Filter Args: key (str): The element to be checked Returns: bool: True if likely encountered, False if definately not""" return self.check_alt(self.hashes(key)) def check_alt(self, hashes: HashResultsT) -> bool: """Check if the element represented by hashes is in the Bloom Filter Args: hashes (list): A list of integers representing the key to check Returns: bool: True if likely encountered, False if definately not""" for i in range(self._number_hashes): k = hashes[i] % self._num_bits if (self._bloom[k // 8] & (1 << (k % 8))) == 0: return False return True def export_hex(self) -> str: """Export the Bloom Filter as a hex string Return: str: Hex representation of the Bloom Filter""" footer_bytes = self._FOOTER_STRUCT_BE.pack( self.estimated_elements, self.elements_added, self.false_positive_rate, ) bytes_string = hexlify(bytearray(self._bloom[: self.bloom_length])) + hexlify(footer_bytes) return str(bytes_string, "utf-8") def export(self, file: Union[Path, str, IOBase, mmap]) -> None: """Export the Bloom Filter to disk Args: filename (str): The filename to which the Bloom Filter will be written.""" if not isinstance(file, (IOBase, mmap)): with open(file, "wb") as filepointer: self.export(filepointer) # type: ignore else: self._bloom.tofile(file) # type: ignore file.write( self._FOOTER_STRUCT.pack( self.estimated_elements, self.elements_added, self.false_positive_rate, ) ) def export_c_header(self, filename: Union[str, Path]) -> None: """Export the Bloom Filter to disk as a C header file. Args: filename (str): The filename to which the Bloom Filter will be written.""" data = ( " " + line for line in wrap(", ".join(("0x{:02x}".format(e) for e in bytearray.fromhex(self.export_hex()))), 80) ) if self._type in ["regular", "regular-on-disk"]: bloom_type = "standard BloomFilter" else: bloom_type = "CountingBloomFilter" with open(filename, "w") as file: print("/* BloomFilter Export of a {} */".format(bloom_type), file=file) print("#include <inttypes.h>", file=file) print("const uint64_t estimated_elements = ", self.estimated_elements, ";", sep="", file=file) print("const uint64_t elements_added = ", self.elements_added, ";", sep="", file=file) print("const float false_positive_rate = ", self.false_positive_rate, ";", sep="", file=file) print("const uint64_t number_bits = ", self.number_bits, ";", sep="", file=file) print("const unsigned int number_hashes = ", self.number_hashes, ";", sep="", file=file) print("const unsigned char bloom[] = {", *data, "};", sep="\n", file=file) @classmethod def frombytes(cls, b: ByteString, hash_function: Union[HashFuncT, None] = None) -> "BloomFilter": """ Args: b (ByteString): The bytes to load as a Bloom Filter hash_function (function): Hashing strategy function to use `hf(key, number)` Returns: BloomFilter: A Bloom Filter object """ offset = cls._FOOTER_STRUCT.size est_els, els_added, fpr, _, _ = cls._parse_footer(cls._FOOTER_STRUCT, bytes(b[-offset:])) blm = BloomFilter(est_elements=est_els, false_positive_rate=fpr, hash_function=hash_function) blm._load(b, hash_function=blm.hash_function) blm._els_added = els_added return blm def estimate_elements(self) -> int: """Estimate the number of unique elements added Returns: int: Number of elements estimated to be inserted Note: Returns -1 if all bits in the Bloom filter are set""" setbits = self._cnt_number_bits_set() if setbits >= self.number_bits: return -1 # not sure this is the "best", but it would signal something is wrong log_n = math.log(1 - (float(setbits) / float(self.number_bits))) tmp = float(self.number_bits) / float(self.number_hashes) return int(-1 * tmp * log_n) def export_size(self) -> int: """Calculate the size of the bloom on disk Returns: int: Size of the Bloom Filter when exported to disk""" return (self.bloom_length * self._IMPT_STRUCT.size) + self._FOOTER_STRUCT.size def current_false_positive_rate(self) -> float: """Calculate the current false positive rate based on elements added Return: float:
from __future__ import print_function, absolute_import, division import KratosMultiphysics import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication import KratosMultiphysics.KratosUnittest as KratosUnittest from math import sqrt, sin, cos, pi, exp, atan class BasePatchTestCrBeam3D2N(KratosUnittest.TestCase): def setUp(self): pass def _add_dofs(self,mp): # Adding dofs AND their corresponding reactions KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_X, KratosMultiphysics.REACTION_X,mp) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Y, KratosMultiphysics.REACTION_Y,mp) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DISPLACEMENT_Z, KratosMultiphysics.REACTION_Z,mp) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_X, KratosMultiphysics.REACTION_MOMENT_X,mp) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Y, KratosMultiphysics.REACTION_MOMENT_Y,mp) KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.ROTATION_Z, KratosMultiphysics.REACTION_MOMENT_Z,mp) def _add_variables(self,mp): mp.AddNodalSolutionStepVariable(KratosMultiphysics.DISPLACEMENT) mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION) mp.AddNodalSolutionStepVariable(KratosMultiphysics.ROTATION) mp.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION_MOMENT) mp.AddNodalSolutionStepVariable(StructuralMechanicsApplication.POINT_LOAD) mp.AddNodalSolutionStepVariable(StructuralMechanicsApplication.POINT_MOMENT) mp.AddNodalSolutionStepVariable(KratosMultiphysics.VOLUME_ACCELERATION) mp.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY) mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_VELOCITY) mp.AddNodalSolutionStepVariable(KratosMultiphysics.ACCELERATION) mp.AddNodalSolutionStepVariable(KratosMultiphysics.ANGULAR_ACCELERATION) def _apply_material_properties(self,mp,dim): #define properties mp.GetProperties()[0].SetValue(KratosMultiphysics.YOUNG_MODULUS,210e9) mp.GetProperties()[0].SetValue(KratosMultiphysics.DENSITY,7850) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.CROSS_AREA,0.01) mp.GetProperties()[0].SetValue(KratosMultiphysics.POISSON_RATIO,0.30) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.TORSIONAL_INERTIA,0.00001) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I22,0.00001) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.I33,0.00001) g = [0,0,0] mp.GetProperties()[0].SetValue(KratosMultiphysics.VOLUME_ACCELERATION,g) cl = StructuralMechanicsApplication.LinearElastic3DLaw() mp.GetProperties()[0].SetValue(KratosMultiphysics.CONSTITUTIVE_LAW,cl) def _apply_elemental_data(self,element): # Adding LOCAL_AXIS_2 element.SetValue(KratosMultiphysics.LOCAL_AXIS_2,[0,1,0]) def _apply_3D_moment_hinge_z(self,element): # Adding LOCAL_AXIS_2 element.SetValue(StructuralMechanicsApplication.CONDENSED_DOF_LIST,[11]) def _apply_BCs(self,mp,which_dof): if (which_dof == 'xyz'): KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_X, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Y, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Z, True, mp.Nodes) if (which_dof == 'xz'): KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_X, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Z, True, mp.Nodes) if (which_dof == 'yz'): KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Y, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.DISPLACEMENT_Z, True, mp.Nodes) if (which_dof == 'rotXYZ'): KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_X, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_Y, True, mp.Nodes) KratosMultiphysics.VariableUtils().ApplyFixity(KratosMultiphysics.ROTATION_Z, True, mp.Nodes) def _apply_Neumann_BCs(self,mp,which_dof,load_size_dir): if(which_dof == 'y'): KratosMultiphysics.VariableUtils().SetScalarVar(StructuralMechanicsApplication. POINT_LOAD_Y, load_size_dir, mp.Nodes) # for node in mp.Nodes: # node.SetSolutionStepValue(StructuralMechanicsApplication. # POINT_LOAD_Y,0,load_size_dir) if(which_dof == 'x'): KratosMultiphysics.VariableUtils().SetScalarVar(StructuralMechanicsApplication. POINT_LOAD_X, load_size_dir, mp.Nodes) # for node in mp.Nodes: # node.SetSolutionStepValue(StructuralMechanicsApplication. # POINT_LOAD_X,0,load_size_dir) def _solve_linear(self,mp): linear_solver = KratosMultiphysics.SkylineLUFactorizationSolver() builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver) scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme() compute_reactions = True #Now the rotation reactions (REACTION_MOMENT) is added, so it works reform_step_dofs = True calculate_norm_dx = False move_mesh_flag = True strategy = KratosMultiphysics.ResidualBasedLinearStrategy(mp, scheme, linear_solver, builder_and_solver, compute_reactions, reform_step_dofs, calculate_norm_dx, move_mesh_flag) strategy.SetEchoLevel(0) strategy.Check() strategy.Solve() def _solve_nonlinear(self,mp): linear_solver = KratosMultiphysics.SkylineLUFactorizationSolver() builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver) scheme = KratosMultiphysics.ResidualBasedIncrementalUpdateStaticScheme() convergence_criterion = StructuralMechanicsApplication.DisplacementAndOtherDoFCriteria(1e-15,1e-15) convergence_criterion.SetEchoLevel(0) max_iters = 1000 compute_reactions = True reform_step_dofs = True move_mesh_flag = True strategy = KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(mp, scheme, linear_solver, convergence_criterion, builder_and_solver, max_iters, compute_reactions, reform_step_dofs, move_mesh_flag) strategy.SetEchoLevel(0) strategy.Check() strategy.Solve() def _solve_dynamic(self,mp): #define a minimal newton raphson solver linear_solver = KratosMultiphysics.SkylineLUFactorizationSolver() builder_and_solver = KratosMultiphysics.ResidualBasedBlockBuilderAndSolver(linear_solver) scheme = KratosMultiphysics.ResidualBasedBossakDisplacementScheme(0.00) convergence_criterion = KratosMultiphysics.ResidualCriteria(1e-8,1e-8) convergence_criterion.SetEchoLevel(0) max_iters = 1000 compute_reactions = True reform_step_dofs = True move_mesh_flag = True strategy = KratosMultiphysics.ResidualBasedNewtonRaphsonStrategy(mp, scheme, linear_solver, convergence_criterion, builder_and_solver, max_iters, compute_reactions, reform_step_dofs, move_mesh_flag) strategy.SetEchoLevel(0) strategy.Check() strategy.Solve() def _check_results_linear(self,mp,endNode): #check displacement result displacement_cantilever_tip = mp.Nodes[endNode].GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT) disp_y_analytical = -400000.00*(1.2**3)/(3*210e9*0.00001) self.assertAlmostEqual(0.00, displacement_cantilever_tip[0],6) self.assertAlmostEqual(disp_y_analytical, displacement_cantilever_tip[1],6) self.assertAlmostEqual(0.00, displacement_cantilever_tip[2],6) def _check_results_nonlinear(self,mp,timestep,Moment_i,endNode): ##node at cantilever tip node_temp = mp.Nodes[endNode] displacement_x = node_temp.GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT_X) displacement_y = node_temp.GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT_Y) moment_z = node_temp.GetSolutionStepValue(StructuralMechanicsApplication. POINT_MOMENT_Z) #check moment z self.assertAlmostEqual(moment_z, Moment_i) #check displacement as soon as a total circle is formed #M = EI * 2 * pi / L ---> 13200000.0 reached at t_step = 527 if (timestep == 5): self.assertAlmostEqual(displacement_x, -0.0008495924536316574) self.assertAlmostEqual(displacement_y, 0.03569912736309147) def _check_results_dynamic(self,mp,time_i,nr_nodes,time_step): #check free vibration of cantilever tip disp_y_simulated = mp.Nodes[nr_nodes].GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT_Y) disp_y_analytical = [-4.4017262561983686e-05,-0.00018621779467051006, -0.00040709297076666834,-0.0006775988708011861,-0.0009923249270175282] self.assertAlmostEqual(disp_y_analytical[time_step], disp_y_simulated) def _check_results_dynamic_lumped(self,mp,time_i,nr_nodes,time_step): #check free vibration of cantilever tip disp_y_simulated = mp.Nodes[nr_nodes].GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT_Y) disp_y_analytical = [-4.162518390580818e-05,-0.00017969144438005632, -0.00039846788371390653,-0.0006674048593190372,-0.000980511641724115] self.assertAlmostEqual(disp_y_analytical[time_step], disp_y_simulated) def _check_results_dynamic_explicit(self,mp,time_i,nr_nodes,time_step): #check free vibration of cantilever tip disp_y_simulated = mp.Nodes[nr_nodes].GetSolutionStepValue( KratosMultiphysics.DISPLACEMENT_Y) disp_y_analytical = [-2.8662420382165618e-06, -7.324435068359769e-06, -1.1284349973901058e-05, -1.832745727658638e-05, -2.7392682029790193e-05, -3.725949945576998e-05, -5.039573129445816e-05, -6.550420107648481e-05, -8.13072032132872e-05, -9.994712970365864e-05, -0.00012023689482204995, -0.00014100714864344788] self.assertAlmostEqual(disp_y_analytical[time_step], disp_y_simulated,6) def _set_and_fill_buffer(self,mp,buffer_size,delta_time): # Set buffer size mp.SetBufferSize(buffer_size) # Fill buffer time = mp.ProcessInfo[KratosMultiphysics.TIME] time = time - delta_time * (buffer_size) mp.ProcessInfo.SetValue(KratosMultiphysics.TIME, time) for size in range(0, buffer_size): step = size - (buffer_size -1) mp.ProcessInfo.SetValue(KratosMultiphysics.STEP, step) time = time + delta_time #delta_time is computed from previous time in process_info mp.CloneTimeStep(time) mp.ProcessInfo[KratosMultiphysics.IS_RESTARTED] = False class DynamicPatchTestBeam3D2N(BasePatchTestCrBeam3D2N): def test_cr_beam_dynamic_lumped_mass_matrix(self): dim = 3 nr_nodes = 11 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.USE_CONSISTENT_MASS_MATRIX,False) #create nodes dx = 1.00 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply constant boundary conditions self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') Force_Y = -100000.000 self._apply_Neumann_BCs(bcs_neumann,'y',Force_Y) #loop over time time_start = 0.00 time_end = 0.0004 # time_delta = 0.001 time_delta = 0.0001 time_i = time_start time_step = 0 self._set_and_fill_buffer(mp,2,time_delta) x = [] y = [] y_1 = [] while (time_i <= time_end): time_i += time_delta mp.CloneTimeStep(time_i) #solve + compare self._solve_dynamic(mp) self._check_results_dynamic_lumped(mp,time_i,nr_nodes,time_step) time_step += 1 def test_cr_beam_dynamic_consistent_mass_matrix(self): dim = 3 nr_nodes = 11 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.USE_CONSISTENT_MASS_MATRIX,True) #create nodes dx = 1.00 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply constant boundary conditions self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') Force_Y = -100000.000 self._apply_Neumann_BCs(bcs_neumann,'y',Force_Y) #loop over time time_start = 0.00 time_end = 0.0004 # time_delta = 0.001 time_delta = 0.0001 time_i = time_start time_step = 0 self._set_and_fill_buffer(mp,2,time_delta) x = [] y = [] y_1 = [] while (time_i <= time_end): time_i += time_delta mp.CloneTimeStep(time_i) #solve + compare self._solve_dynamic(mp) self._check_results_dynamic(mp,time_i,nr_nodes,time_step) time_step += 1 def test_cr_beam_dynamic_explicit(self): dim = 3 nr_nodes = 11 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) _add_explicit_variables(mp) self._apply_material_properties(mp,dim) mp.GetProperties()[0].SetValue(StructuralMechanicsApplication.USE_CONSISTENT_MASS_MATRIX,True) #create nodes dx = 1.00 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply constant boundary conditions self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') Force_Y = -100000.000 self._apply_Neumann_BCs(bcs_neumann,'y',Force_Y) #loop over time time_start = 0.00 time_delta = 0.000015 time_end = time_delta*12 # time_delta = 0.001 time_i = time_start time_step = 0 self._set_and_fill_buffer(mp,2,time_delta) strategy_expl = _create_dynamic_explicit_strategy(mp) while (time_i <= time_end): time_i += time_delta mp.CloneTimeStep(time_i) #solve + compare strategy_expl.Solve() self._check_results_dynamic_explicit(mp,time_i,nr_nodes,time_step) time_step += 1 class StaticPatchTestBeam3D2N(BasePatchTestCrBeam3D2N): def test_cr_beam_linear(self): dim = 3 nr_nodes = 11 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) #create nodes dx = 1.20 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply boundary conditions Force_Y = -400000.00 self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') self._apply_Neumann_BCs(bcs_neumann,'y',Force_Y) #solve + compare self._solve_linear(mp) self._check_results_linear(mp,nr_nodes) def test_cr_beam_linear_local_axis2(self): dim = 3 nr_nodes = 11 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) #create nodes dx = 1.20 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply local_axis_2 elemental data for i in range(nr_elements): self._apply_elemental_data(mp.GetElement(i+1)) #apply boundary conditions Force_Y = -400000.00 self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') self._apply_Neumann_BCs(bcs_neumann,'y',Force_Y) #solve + compare self._solve_linear(mp) self._check_results_linear(mp,nr_nodes) def test_cr_beam_nonlinear(self): dim = 3 nr_nodes = 21 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) #create nodes dx = 1.00 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointMomentCondition3D1N",1,[nr_nodes],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply constant boundary conditions self._apply_BCs(bcs_xyz,'xyz') self._apply_BCs(bcs_rot,'rotXYZ') #incrementally increase load -> nonlinear case Moment_Z = 25000.00 time_start = 0.00 time_end = 5 time_delta = 1 time_i = time_start time_step = 0 while (time_i <= time_end): time_i += time_delta #apply non-constant boundary conditions Moment_i = Moment_Z*time_i mp.Nodes[nr_nodes].SetSolutionStepValue(StructuralMechanicsApplication. POINT_MOMENT_Z,0,Moment_i) #solve + compare self._solve_nonlinear(mp) self._check_results_nonlinear(mp,time_step,Moment_i,nr_nodes) time_step += 1 def test_cr_beam_linear_moment_hinge(self): dim = 2 nr_nodes = 3 nr_elements = nr_nodes-1 current_model = KratosMultiphysics.Model() mp = current_model.CreateModelPart("solid_part") mp.ProcessInfo.SetValue(KratosMultiphysics.DOMAIN_SIZE, dim) self._add_variables(mp) self._apply_material_properties(mp,dim) #create nodes dx = 2.20 / nr_elements for i in range(nr_nodes): mp.CreateNewNode(i+1,i*dx,0.00,0.00) #add dofs self._add_dofs(mp) #create condition mp.CreateNewCondition("PointLoadCondition3D1N",1,[nr_nodes-1],mp.GetProperties()[0]) #create submodelparts for dirichlet boundary conditions bcs_xyz = mp.CreateSubModelPart("Dirichlet_XYZ") bcs_xyz.AddNodes([1,nr_nodes]) bcs_rot = mp.CreateSubModelPart("Dirichlet_RotAll") bcs_rot.AddNodes([1,nr_nodes]) #create a submodalpart for neumann boundary conditions bcs_neumann = mp.CreateSubModelPart("PointLoad3D_neumann") bcs_neumann.AddNodes([nr_nodes-1]) bcs_neumann.AddConditions([1]) #create Element for i in range(nr_elements): mp.CreateNewElement("CrLinearBeamElement3D2N", i+1, [i+1,i+2], mp.GetProperties()[0]) #apply condensation
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Integration tests for gcs_ocn_bq_ingest""" import json import os import uuid from typing import List import pytest from google.cloud import bigquery from google.cloud import error_reporting from google.cloud import storage import gcs_ocn_bq_ingest.common.ordering import gcs_ocn_bq_ingest.common.utils TEST_DIR = os.path.realpath(os.path.dirname(__file__)) LOAD_JOB_POLLING_TIMEOUT = 10 # seconds @pytest.fixture(scope="package") def bq() -> bigquery.Client: """BigQuery Client""" return bigquery.Client(location="US") @pytest.fixture(scope="package") def gcs() -> storage.Client: """GCS Client""" return storage.Client() @pytest.fixture(scope="package") def error() -> error_reporting.Client: """GCS Client""" return error_reporting.Client() @pytest.fixture def gcs_bucket(request, gcs: storage.Client) -> storage.Bucket: """GCS bucket for test artifacts""" bucket = gcs.create_bucket(f"test_gcs_ocn_bq_ingest_{str(uuid.uuid4())}") bucket.versioning_enabled = True bucket.patch() # overide default field delimiter at bucket level load_config_json = { "fieldDelimiter": "|", } load_json_blob: storage.Blob = bucket.blob("_config/load.json") load_json_blob.upload_from_string(json.dumps(load_config_json)) def teardown(): # Since bucket has object versioning enabled, you must # delete all versions of objects before you can delete the bucket. for blob in gcs.list_blobs(bucket, versions=True): blob.delete() bucket.delete(force=True) request.addfinalizer(teardown) return bucket @pytest.fixture def mock_env(gcs, monkeypatch): """ environment variable mocks All tests use this fixture; it is specified in the pyest.ini file as: [pytest] usefixtures = mock_env For more information on module-wide fixtures, see: https://docs.pytest.org/en/stable/fixture.html#use-fixtures-in-classes-and-modules-with-usefixtures """ # Infer project from the gcs client application default credentials. monkeypatch.setenv("GCP_PROJECT", gcs.project) monkeypatch.setenv("FUNCTION_NAME", "integration-test") monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540") monkeypatch.setenv("BQ_PROJECT", gcs.project) @pytest.fixture def ordered_mock_env(monkeypatch): """environment variable mocks""" monkeypatch.setenv("ORDER_PER_TABLE", "TRUE") @pytest.fixture def dest_dataset(request, bq, monkeypatch): random_dataset = (f"test_bq_ingest_gcf_" f"{str(uuid.uuid4())[:8].replace('-', '_')}") if os.getenv('GCP_PROJECT') is None: monkeypatch.setenv("GCP_PROJECT", bq.project) dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}" f".{random_dataset}") dataset.location = "US" bq.create_dataset(dataset) print(f"created dataset {dataset.dataset_id}") def teardown(): bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True) request.addfinalizer(teardown) return dataset @pytest.fixture def dest_table(monkeypatch, request, bq, dest_dataset) -> bigquery.Table: with open(os.path.join(TEST_DIR, "resources", "nation_schema.json")) as schema_file: schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema( json.load(schema_file)) if os.getenv('GCP_PROJECT') is None: monkeypatch.setenv("GCP_PROJECT", bq.project) table = bq.create_table( bigquery.Table( f"{os.getenv('GCP_PROJECT')}" f".{dest_dataset.dataset_id}.cf_test_nation_" f"{str(uuid.uuid4()).replace('-', '_')}", schema=schema, )) def teardown(): bq.delete_table(table, not_found_ok=True) request.addfinalizer(teardown) return table @pytest.fixture def gcs_data(gcs_bucket, dest_dataset, dest_table) -> storage.Blob: data_objs: List[storage.Blob] = [] for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ f"{dest_dataset.project}.{dest_dataset.dataset_id}", dest_table.table_id, test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nation", test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_data_under_sub_dirs(gcs_bucket, dest_dataset, dest_table) -> storage.Blob: data_objs = [] for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ f"{dest_dataset.project}.{dest_dataset.dataset_id}", dest_table.table_id, "foo", "bar", "baz", test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nation", test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_truncating_load_config(gcs_bucket, dest_dataset, dest_table) -> List[storage.Blob]: config_objs: List[storage.Blob] = [] config_obj: storage.Blob = gcs_bucket.blob("/".join([ dest_dataset.dataset_id, dest_table.table_id, "_config", "load.json", ])) config_obj.upload_from_string( json.dumps({"writeDisposition": "WRITE_TRUNCATE"})) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_batched_data(gcs_bucket, dest_dataset, dest_table) -> List[storage.Blob]: """ upload two batches of data """ data_objs: List[storage.Blob] = [] for batch in ["batch0", "batch1"]: for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ dest_dataset.dataset_id, dest_table.table_id, batch, test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nation", test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_external_config(gcs_bucket, dest_dataset, dest_table) -> List[storage.Blob]: config_objs = [] sql_obj = gcs_bucket.blob("/".join([ f"{dest_dataset.project}.{dest_dataset.dataset_id}", dest_table.table_id, "_config", "bq_transform.sql", ])) sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext" sql_obj.upload_from_string(sql) config_obj = gcs_bucket.blob("/".join([ f"{dest_dataset.project}.{dest_dataset.dataset_id}", dest_table.table_id, "_config", "external.json" ])) with open(os.path.join(TEST_DIR, "resources", "nation_schema.json")) as schema: fields = json.load(schema) config = { "schema": { "fields": fields }, "csvOptions": { "allowJaggedRows": False, "allowQuotedNewlines": False, "encoding": "UTF-8", "fieldDelimiter": "|", "skipLeadingRows": 0, }, "sourceFormat": "CSV", "sourceUris": ["REPLACEME"], } config_obj.upload_from_string(json.dumps(config)) config_objs.append(sql_obj) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_destination_config(gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: """ This tests that a load.json file with destinationTable specified is used to load data. """ config_objs = [] config_obj: storage.Blob = gcs_bucket.blob("/".join([ "_config", "load.json", ])) config_obj.upload_from_string( json.dumps({ "writeDisposition": "WRITE_TRUNCATE", "fieldDelimiter": "|", "destinationTable": { "projectId": dest_partitioned_table.project, "datasetId": dest_partitioned_table.dataset_id, "tableId": dest_partitioned_table.table_id }, "destinationRegex": ( r"(?P<table>.*?)/" # ignore everything leading up to partition r"\$?(?P<yyyy>[\d]{4})/?" # partition year (yyyy) (optional) r"(?P<mm>[\d]{2})?/?" # partition month (mm) (optional) r"(?P<dd>[\d]{2})?/?" # partition day (dd) (optional) r"(?P<hh>[\d]{2})?/?" # partition hour (hh) (optional) ) })) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_destination_parquet_config( gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: """ This tests that a load.json file with destinationTable specified is used to load data. :param gcs_bucket: :param dest_dataset: :param dest_partitioned_table: :return: """ destination_regex = ( r"(?P<table>.*?)" # ignore everything leading up to partition r"(?:[\d]{4})?/?" r"(?:[\d]{2})?/?" r"(?:[\d]{2})?/?" r"(?P<batch>[\d]{2})/?" # batch ) config_objs = [] config_obj: storage.Blob = gcs_bucket.blob("/".join([ "_config", "load.json", ])) config_obj.upload_from_string( json.dumps({ "sourceFormat": "PARQUET", "destinationTable": { "projectId": dest_partitioned_table.project, "datasetId": dest_partitioned_table.dataset_id, "tableId": dest_partitioned_table.table_id }, "destinationRegex": destination_regex, })) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_destination_parquet_config_hive_partitioned( gcs_bucket, dest_dataset, dest_hive_partitioned_table) -> List[storage.Blob]: """ This tests that a load.json file with destinationTable and destinationRegex specified is used to load data. :param gcs_bucket: :param dest_dataset: :param dest_hive_partitioned_table: :return: """ destination_regex = ( r"(?P<table>.*?)/" # ignore everything leading up to partition r"(?P<yyyy>[\d]{4})/" r"(?P<mm>[\d]{2})/" r"(?P<dd>[\d]{2})/" r"(?P<hh>[\d]{2})/" # r"^(?:[\w\-_0-9]+)/(?P<dataset>[\w\-_0-9\.]+)/" # r"(?P<table>[\w\-_0-9]+)/?" # r"(?:incremental|history)?/?" # r"(?:[0-9]{4})?/?" # r"(?:[0-9]{2})?/?" # r"(?:[0-9]{2})?/?" # r"(?:[0-9]{2})?/?" # r"(?P<batch>[0-9]+)/?" ) config_objs = [] config_obj: storage.Blob = gcs_bucket.blob("/".join([ "_config", "load.json", ])) config_obj.upload_from_string( json.dumps({ "sourceFormat": "PARQUET", "destinationTable": { "projectId": dest_hive_partitioned_table.project, "datasetId": dest_hive_partitioned_table.dataset_id, "tableId": dest_hive_partitioned_table.table_id }, "destinationRegex": destination_regex, "dataSourceName": "some-onprem-data-source" })) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_destination_parquet_config_partitioned_alternate( gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: """ This tests that a load.json file with destinationTable and destinationRegex specified is used to load data. :param gcs_bucket: :param dest_dataset: :param dest_hive_partitioned_table: :return: """ destination_regex = ( r"(?P<table>.*?)/" # ignore everything leading up to partition r"year=(?P<yyyy>[\d]{4})/" r"month=(?P<mm>[\d]{1,2})/" r"day=(?P<dd>[\d]{1,2})/" r"hr=(?P<hh>[\d]{1,2})/") config_objs = [] config_obj: storage.Blob = gcs_bucket.blob("/".join([ "_config", "load.json", ])) config_obj.upload_from_string( json.dumps({ "sourceFormat": "PARQUET", "destinationTable": { "projectId": dest_partitioned_table.project, "datasetId": dest_partitioned_table.dataset_id, "tableId": dest_partitioned_table.table_id }, "destinationRegex": destination_regex, "dataSourceName": "some-onprem-data-source" })) config_objs.append(config_obj) return config_objs @pytest.fixture def gcs_partitioned_data(gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in ["nyc_311.csv", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ dest_dataset.dataset_id, dest_partitioned_table.table_id, partition, test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) dot_blob: storage.Blob = gcs_bucket.blob("/".join([ dest_dataset.dataset_id, dest_partitioned_table.table_id, partition, ".file_that_starts_with_dot" ])) dot_blob.upload_from_string("") data_objs.append(dot_blob) return data_objs @pytest.fixture def gcs_partitioned_data_allow_jagged( gcs_bucket, dest_dataset, dest_partitioned_table_allow_jagged) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in ["nyc_311.csv.gz", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ dest_dataset.dataset_id, dest_partitioned_table_allow_jagged.table_id, partition, test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_partitioned_parquet_data(gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in [ "nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet", "_SUCCESS" ]: data_obj: storage.Blob = gcs_bucket.blob("/".join( [partition, test_file])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_split_path_partitioned_data( gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in ["nyc_311.csv", "_SUCCESS"]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ "foo", "bar", "baz", partition[1:5], # year partition[5:7], # month partition[7:9], # day partition[9:], # hour "hive_part_column=9999", test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_split_path_partitioned_parquet_data( gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in [ "nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet" ]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ "foo", "bar", "baz", partition[1:5], # year partition[5:7], # month partition[7:9], # day partition[9:], # batch "hive_part_column=9999", test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) # Add _SUCCESS file under the hour partition folder data_obj = gcs_bucket.blob("/".join([ "foo", "bar", "baz", partition[1:5], # year partition[5:7], # month partition[7:9], # day partition[9:], # batch "_SUCCESS" ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, "_SUCCESS")) data_objs.append(data_obj) return data_objs @pytest.fixture def gcs_split_path_partitioned_parquet_data_alternate( gcs_bucket, dest_dataset, dest_partitioned_table) -> List[storage.Blob]: data_objs = [] for partition in ["$2017041101", "$2017041102"]: for test_file in [ "nyc311_25_rows_00.parquet", "nyc311_25_rows_01.parquet" ]: data_obj: storage.Blob = gcs_bucket.blob("/".join([ "foo", "bar", "baz", f"year={partition[1:5]}", # year f"month={partition[5:7]}", # month f"day={partition[7:9]}", # day f"hr={partition[9:]}", # batch test_file ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, test_file)) data_objs.append(data_obj) # Add _SUCCESS file under the hour partition folder data_obj = gcs_bucket.blob("/".join([ "foo", "bar", "baz", f"year={partition[1:5]}", # year f"month={partition[5:7]}", # month f"day={partition[7:9]}", # day f"hr={partition[9:]}", # batch "_SUCCESS" ])) data_obj.upload_from_filename( os.path.join(TEST_DIR, "resources", "test-data", "nyc_311", partition, "_SUCCESS")) data_objs.append(data_obj) return data_objs @pytest.fixture def dest_partitioned_table(bq: bigquery.Client, dest_dataset, monkeypatch) -> bigquery.Table: public_table: bigquery.Table = bq.get_table( bigquery.TableReference.from_string( "bigquery-public-data.new_york_311.311_service_requests")) schema = public_table.schema if os.getenv('GCP_PROJECT') is None: monkeypatch.setenv("GCP_PROJECT", bq.project) table: bigquery.Table = bigquery.Table( f"{os.getenv('GCP_PROJECT')}" f".{dest_dataset.dataset_id}.cf_test_nyc_311_" f"{str(uuid.uuid4()).replace('-', '_')}", schema=schema, ) table.time_partitioning = bigquery.TimePartitioning() table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR table.time_partitioning.field = "created_date" table = bq.create_table(table) return table @pytest.fixture def dest_hive_partitioned_table(bq: bigquery.Client, dest_dataset, monkeypatch) -> bigquery.Table: public_table: bigquery.Table = bq.get_table( bigquery.TableReference.from_string( "bigquery-public-data.new_york_311.311_service_requests")) schema =
<gh_stars>0 import sys import os import pyperclip from PyQt5.QtWidgets import QAction, QLabel, QMainWindow, QMessageBox, QWidget, QLineEdit, QPushButton, QLineEdit, QApplication, QScrollArea, QVBoxLayout, QCheckBox, QMenuBar from PyQt5.QtGui import QIcon, QPixmap, QFont, QDesktopServices from PyQt5.QtCore import QUrl, Qt import Functions class SignInApp(QMainWindow): def __init__(self): super().__init__() self.initializeMainWindow() self.initializeUI() def initializeMainWindow(self): # Initializing Main Window self.setWindowTitle("Random Password Generator - Sign In | Log In") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) #Width, Height # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) # Initializing Font self.headingFont = QFont() self.headingFont.setFamily("Arial Black") self.headingFont.setPointSize(20) self.headingFont.setBold(True) self.headingFont.setUnderline(False) self.headingFont.setWeight(75) def initializeUI(self): # Initializing Labels self.mainLabel = QLabel(self) self.signUpLabel = QLabel(self) # Initializing Text Fields self.usernameLineEdit = QLineEdit(self) self.passwordLineEdit = QLineEdit(self) # Initializing Buttons self.signInButton = QPushButton(self) self.signInButton.clicked.connect(self.getData) self.signUpButton = QPushButton(self) self.signUpButton.clicked.connect(self.runSignUpApp) self.deleteUserButton = QPushButton(self) self.deleteUserButton.clicked.connect(self.runDeleteUserApp) # Customizing Labels self.mainLabel.setGeometry(20, 10, 251, 41) # x, y, width, height self.mainLabel.setText("Sign In | Log In") self.mainLabel.setFont(self.headingFont) self.mainLabel.show() self.signUpLabel.setGeometry(70, 340, 130, 21) # x, y, width, height self.signUpLabel.setText("Don't have an account?") self.signUpLabel.show() # Customizing Text Fields self.usernameLineEdit.setGeometry(10, 170, 251, 23) # x, y, width, height self.usernameLineEdit.setPlaceholderText("Username") self.usernameLineEdit.show() self.passwordLineEdit.setGeometry(10, 210, 251, 23) # x, y, width, height self.passwordLineEdit.setPlaceholderText("Password") self.passwordLineEdit.setEchoMode(QLineEdit.EchoMode.Password) self.passwordLineEdit.show() # Customizing Buttons self.signInButton.setGeometry(90, 250, 91, 41) # x, y, width, height self.signInButton.setText("Sign In") self.signInButton.setCursor(Qt.PointingHandCursor) self.signInButton.show() self.signUpButton.setGeometry(210, 340, 61, 21) # x, y, width, height self.signUpButton.setText("Sign Up") self.signUpButton.setCursor(Qt.PointingHandCursor) self.signUpButton.show() self.deleteUserButton.setGeometry(180, 310, 91, 21) # x, y, width, height self.deleteUserButton.setText("Delete User") self.deleteUserButton.setCursor(Qt.PointingHandCursor) self.deleteUserButton.show() def getData(self): self.username = self.usernameLineEdit.text() self.password = self.passwordLineEdit.text() self.isCorrect = Functions.validateSignIn(self.username, self.password) if self.isCorrect == True: self.runMainApp() else: self.incorrectInfo() def incorrectInfo(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Critical) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Error!") self.msgBox.setText("Incorrect Information!") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def runMainApp(self): self.close() global username username = self.username self.MainWindow = MainApp() self.MainWindow.show() def runSignUpApp(self): self.close() self.MainWindow = SignUpApp() self.MainWindow.show() def runDeleteUserApp(self): self.close() self.MainWindow = DeleteUserApp() self.MainWindow.show() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.close() if e.key() == Qt.Key_Return: self.getData() class SignUpApp(QMainWindow): def __init__(self): super().__init__() self.initializeMainWindow() self.initializeUI() def initializeMainWindow(self): # Initializing Main Window self.setWindowTitle("Random Password Generator - Sign Up") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) #Width, Height # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) # Initializing Font self.headingFont = QFont() self.headingFont.setFamily("Arial Black") self.headingFont.setPointSize(19) self.headingFont.setBold(True) self.headingFont.setUnderline(False) self.headingFont.setWeight(75) def initializeUI(self): # Initializing Labels self.mainLabel = QLabel(self) self.signInLabel = QLabel(self) # Initializing Text Fields self.usernameLineEdit = QLineEdit(self) self.passwordLineEdit = QLineEdit(self) self.confirmPasswordLineEdit = QLineEdit(self) # Initializing Buttons self.signUpButton = QPushButton(self) self.signUpButton.clicked.connect(self.getData) self.signInButton = QPushButton(self) self.signInButton.clicked.connect(self.runSignInApp) # Customizing Labels self.mainLabel.setGeometry(10, 10, 251, 41) # x, y, width, height self.mainLabel.setText("Sign Up | Register") self.mainLabel.setFont(self.headingFont) self.mainLabel.show() self.signInLabel.setGeometry(67, 340, 140, 21) # x, y, width, height self.signInLabel.setText("Already have an account?") self.signInLabel.show() # Customizing Buttons self.signUpButton.setGeometry(90, 250, 91, 41) # x, y, width, height self.signUpButton.setText("Sign Up") self.signUpButton.setCursor(Qt.PointingHandCursor) self.signUpButton.show() self.signInButton.setGeometry(210, 340, 61, 21) # x, y, width, height self.signInButton.setText("Sign In") self.signInButton.setCursor(Qt.PointingHandCursor) self.signInButton.show() # Customizing Text Fields self.usernameLineEdit.setGeometry(10, 130, 251, 23) # x, y, width, height self.usernameLineEdit.setPlaceholderText("Username") self.usernameLineEdit.show() self.passwordLineEdit.setGeometry(10, 170, 251, 23) # x, y, width, height self.passwordLineEdit.setPlaceholderText("Password") self.passwordLineEdit.setEchoMode(QLineEdit.EchoMode.Password) self.passwordLineEdit.show() self.confirmPasswordLineEdit.setGeometry(10, 210, 251, 23) # x, y, width, height self.confirmPasswordLineEdit.setPlaceholderText("Confirm Password") self.confirmPasswordLineEdit.setEchoMode(QLineEdit.EchoMode.Password) self.confirmPasswordLineEdit.show() def getData(self): self.username = self.usernameLineEdit.text() self.password = self.passwordLineEdit.text() self.confirmPassword = self.confirmPasswordLineEdit.text() self.isCorrect = Functions.validateSignUp(self.username, self.password, self.confirmPassword) if self.isCorrect == True: self.correctInfo() else: self.incorrectInfo() def correctInfo(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Information) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Done.") self.msgBox.setText("Your Account has been created, please Log in.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def incorrectInfo(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Critical) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Error!") self.msgBox.setText("Please provide valid Information and confirm the password fields. Otherwise there is already an account with this username, try logging in.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def runSignInApp(self): self.close() self.MainWindow = SignInApp() self.MainWindow.show() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.close() if e.key() == Qt.Key_Return: self.getData() class MainApp(QMainWindow): def __init__(self): super().__init__() self.initializeWindow() self.initializeUI() def initializeWindow(self): # Initializing Menu Bar self.menuBar = QMenuBar() self.setMenuBar(self.menuBar) # Initializing New Actions self.createNewPassword = QAction("Create", self) self.createNewPassword.setShortcut("Ctrl+N") self.createNewPassword.setStatusTip("Create New Password") self.createNewPassword.triggered.connect(self.runCreateNewApp) self.deletePassword = QAction("Delete", self) self.deletePassword.setShortcut("Ctrl+Del") self.deletePassword.setStatusTip("Delete an existing Password") self.deletePassword.triggered.connect(self.runDeleteApp) self.openGitHub = QAction("GitHub", self) self.openGitHub.setShortcut("Ctrl+H") self.openGitHub.setStatusTip("Open GitHub Repository") self.openGitHub.triggered.connect(self.openGitHubRepo) # Initializing New Menus self.passwordMenu = self.menuBar.addMenu("Password") self.passwordMenu.addAction(self.createNewPassword) self.passwordMenu.addAction(self.deletePassword) self.helpMenu = self.menuBar.addMenu("Help") self.helpMenu.addAction(self.openGitHub) # Initializing Main Window self.setWindowTitle("Random Password Generator") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) # width, height # Initializing Scroll Area self.scroll = QScrollArea() self.widget = QWidget() self.vbox = QVBoxLayout() self.widget.setLayout(self.vbox) self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.scroll.setWidgetResizable(True) self.scroll.setWidget(self.widget) self.setCentralWidget(self.scroll) # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) def initializeUI(self): # Initializing Password Buttons self.keyCount, self.keyList = Functions.Details(username) for i in range(0, self.keyCount): object = QPushButton() object.clicked.connect(self.passwordButtonClicked) object.setObjectName(self.keyList[0]) object.setText(self.keyList[0]) self.keyList.pop(0) self.vbox.addWidget(object) def openGitHubRepo(self): QDesktopServices.openUrl(QUrl("https://github.com/Abled-Taha/Random-Password-Generator-GUI")) def copied(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Information) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Copied!") self.msgBox.setText("You Password has been Copied to your clipboard.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def passwordButtonClicked(self): object = self.sender() password = Functions.getPass(object.objectName(), username) pyperclip.copy(password) self.copied() def runCreateNewApp(self): self.close() self.MainWindow = CreateNewApp() self.MainWindow.show() def runDeleteApp(self): self.close() self.MainWindow = DeleteApp() self.MainWindow.show() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.close() class CreateNewApp(QMainWindow): def __init__(self): super().__init__() self.initializeWindow() self.initializeUI() def initializeWindow(self): # Initializing Main Window self.setWindowTitle("Random Password Generator - Create New Password") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) # width, height # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) def initializeUI(self): # Initializing Buttons self.generatePasswordButton = QPushButton(self) self.generatePasswordButton.clicked.connect(self.getData) self.backButton = QPushButton(self) self.backButton.clicked.connect(self.back) # Initializing Text Fields self.nameOfPasswordField = QLineEdit(self) self.numberOfCharactersField = QLineEdit(self) # Initializing Check Boxes self.capitalLettersCheckBox = QCheckBox(self) self.smallLettersCheckBox = QCheckBox(self) self.numbersCheckBox = QCheckBox(self) self.symbolsCheckBox = QCheckBox(self) # Customizing Buttons self.generatePasswordButton.setGeometry(80, 250, 120, 30) # x, y, width, height self.generatePasswordButton.setText("Generate Password") self.generatePasswordButton.setCursor(Qt.PointingHandCursor) self.generatePasswordButton.show() self.backButton.setGeometry(0, 340, 61, 21) # x, y, width, height self.backButton.setText("Go Back") self.backButton.setCursor(Qt.PointingHandCursor) self.backButton.show() # Customizing Text Fields self.nameOfPasswordField.setGeometry(10, 45, 200, 30) self.nameOfPasswordField.setPlaceholderText("Name the Password.") # x, y, width, height self.nameOfPasswordField.show() self.numberOfCharactersField.setGeometry(10, 80, 200, 30) # x, y, width, height self.numberOfCharactersField.setPlaceholderText("How many characters?") self.numberOfCharactersField.show() # Customizing Check Boxes self.capitalLettersCheckBox.setGeometry(10, 120, 200, 20) # x, y, width, height self.capitalLettersCheckBox.setText("Should Contain Capital Letters?") self.capitalLettersCheckBox.setCursor(Qt.PointingHandCursor) self.capitalLettersCheckBox.show() self.smallLettersCheckBox.setGeometry(10, 140, 200, 20) # x, y, width, height self.smallLettersCheckBox.setText("Should Contain Small Letters?") self.smallLettersCheckBox.setCursor(Qt.PointingHandCursor) self.smallLettersCheckBox.show() self.numbersCheckBox.setGeometry(10, 160, 200, 20) # x, y, width, height self.numbersCheckBox.setText("Should Contain Numbers?") self.numbersCheckBox.setCursor(Qt.PointingHandCursor) self.numbersCheckBox.show() self.symbolsCheckBox.setGeometry(10, 180, 200, 20) # x, y, width, height self.symbolsCheckBox.setText("Should Contain Symbols?") self.symbolsCheckBox.setCursor(Qt.PointingHandCursor) self.symbolsCheckBox.show() def getData(self): try: self.nameOfPasswordFieldText = str(self.nameOfPasswordField.text()) self.numberOfCharactersFieldText = int(self.numberOfCharactersField.text()) except: self.incorrectInfo() self.isCapitalChecked = self.capitalLettersCheckBox.isChecked() self.isSmallChecked = self.smallLettersCheckBox.isChecked() self.isNumbersChecked = self.numbersCheckBox.isChecked() self.isSymbolsChecked = self.symbolsCheckBox.isChecked() allDone = Functions.createPassword(self.nameOfPasswordFieldText, self.numberOfCharactersFieldText, self.isCapitalChecked, self.isSmallChecked, self.isNumbersChecked, self.isSymbolsChecked, username) if allDone == True: self.done() else: return self.incorrectInfo() def back(self): self.close() self.MainWindow = MainApp() self.MainWindow.show() def done(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Information) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Done!") self.msgBox.setText("Your New Password has been created.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def incorrectInfo(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Critical) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Error!") self.msgBox.setText("Please provide valid Information.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.back() if e.key() == Qt.Key_Return: self.getData() class DeleteApp(QMainWindow): def __init__(self): super().__init__() self.initializeWindow() self.initializeUI() def initializeWindow(self): # Initializing Main Window self.setWindowTitle("Random Password Generator - Delete Password") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) #Width, Height # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) def initializeUI(self): # Initializing Buttons self.deleteButton = QPushButton(self) self.deleteButton.clicked.connect(self.getData) self.backButton = QPushButton(self) self.backButton.clicked.connect(self.back) # Initializing Text Fields self.nameOfPasswordTextField = QLineEdit(self) # Customizing Buttons self.deleteButton.setGeometry(85, 200, 100, 30) # x, y, width, height self.deleteButton.setText("Delete Password") self.deleteButton.setCursor(Qt.PointingHandCursor) self.deleteButton.show() self.backButton.setGeometry(0, 340, 61, 21) # x, y, width, height self.backButton.setText("Go Back") self.backButton.setCursor(Qt.PointingHandCursor) self.backButton.show() # Customizing Text Fields self.nameOfPasswordTextField.setGeometry(35, 80, 200, 30) # x, y, width, height self.nameOfPasswordTextField.setPlaceholderText("Name Of Password?") self.nameOfPasswordTextField.show() def getData(self): self.nameOfPasswordTextFieldText = self.nameOfPasswordTextField.text() isDone = Functions.deletePassword(self.nameOfPasswordTextFieldText, username) if isDone: self.done() else: self.incorrectInfo() def back(self): self.close() self.MainWindow = MainApp() self.MainWindow.show() def done(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Information) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Done!") self.msgBox.setText("Your Password has been deleted.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def incorrectInfo(self): self.msgBox = QMessageBox(self) self.msgBox.setIcon(QMessageBox.Icon.Critical) self.msgBox.resize(50, 50) # width, height self.msgBox.setWindowTitle("Error!") self.msgBox.setText("Please provide valid Information.") self.msgBox.setStandardButtons(QMessageBox.Ok) self.msgBox.exec() def keyPressEvent(self, e): if e.key() == Qt.Key_Escape: self.back() if e.key() == Qt.Key_Return: self.getData() class DeleteUserApp(QMainWindow): def __init__(self): super().__init__() self.initializeWindow() self.initializeUI() def initializeWindow(self): # Initializing Main Window self.setWindowTitle("Random Password Generator - Delete User") self.setWindowIcon(QIcon("Files/icon.png")) self.resize(271, 364) #Width, Height # Initializing Icon self.Icon = QIcon() self.Icon.addPixmap(QPixmap("Files/icon.png"), QIcon.Mode.Normal, QIcon.State.Off) def initializeUI(self): # Initializing Buttons self.deleteButton = QPushButton(self) self.deleteButton.clicked.connect(self.getData) self.backButton = QPushButton(self) self.backButton.clicked.connect(self.back) # Initializing Text Fields self.nameOfUserTextField = QLineEdit(self) self.passwordTextField = QLineEdit(self) # Customizing Buttons
<gh_stars>0 import eventlet import json import struct import cPickle as pickle from eventlet import wsgi,GreenPool from eventlet.green import socket from bricks.flow_des import FlowDesGlobal,FlowDes from bricks.message import InfoMessage,UpdateMessageByFlow from scheduler.scheduler import Scheduler,PolicyUtil from multi_controller_topo import read_mapping_from_file,get_reverse_mapping_from_file,get_link_bw,get_link_ltcy import consts import tools import argparse import logging import logger as logger import time BW_NUM = 100 nowTime = lambda:int(round(time.time() * 1000)) class GlobalController(object): # def __init__(self,topo): def __init__(self,local_mapping_file): logger.init('./globalhapi.log',logging.INFO) self.logger=logger.getLogger('global',logging.INFO) # self.topo = topo # self.locals = [0,1] #{0:"bala"} # self.dp_to_local = {1:0,2:1} self.locals = read_mapping_from_file(local_mapping_file).keys() self.dp_to_local = get_reverse_mapping_from_file(local_mapping_file) self.logger.info(self.locals) self.logger.info(self.dp_to_local) self.sockets = {} self.link_bw = {}#outer layer self.link_ltcy = {} self.local_to_buf_size = {} self.dp_to_tcam_size = {} # self.dp_to_local = {1:0,2:0,3:1,4:1}#for lineartopo2 self.flows = {}# all flows now self.flows_new = {} #flows updating self.flows_to_schedule = {} #flow to update self.status_num = 0 #for start update # self.flows_new = {"10.0.0.110.0.0.25001":flowdes0} self.scheduler = Scheduler() self.tag_flows_temp = {} self.schedule_result = {} eventlet.spawn(self.run_fd_server) # tools def finished_update_to_flows(self,f): self.logger.debug("---in move updating flow in flows") self.flows_new.pop(f.flow_id) self.flows.update({f.flow_id:f}) self.logger.debug(self.flows) self.logger.debug(self.flows_new) self.cal_remain_bw() self.logger.debug(self.link_bw) def started_update_to_flows_new(self,f): self.logger.debug("---in move to scd flow to flows_new") self.logger.debug("before") self.logger.debug(self.flows_new) self.logger.debug(self.flows_to_schedule) self.flows_to_schedule.pop(f.flow_id) self.flows_new.update({f.flow_id:f}) self.logger.debug("after") self.logger.debug(self.flows_new) self.logger.debug(self.flows_to_schedule) def make_schedule_topo_for_schedule(self): topo = {} for dpid,nbr_info in self.link_bw.items(): dpentry = {} for dpnext,bw in nbr_info.items(): dpentry[dpnext] = {} dpentry.update({dpnext:{"bandwidth":bw,"latency":self.link_ltcy[dpid][dpnext]}}) topo.update({dpid:dpentry}) return topo def make_dp_dict_for_schedule(self): dp_dict = {} for dpid,local_id in self.dp_to_local.items(): dpentry = {"flowspace":self.dp_to_tcam_size[dpid],"ctrl":self.dp_to_local[dpid]} dp_dict.update({dpid:dpentry}) return dp_dict #for calculating updates def schedule_and_update(self): self.logger.info("________in schedule--------------") topo = self.make_schedule_topo_for_schedule() flows = self.flows_to_schedule ctrl_dict = self.local_to_buf_size dp_dict = self.make_dp_dict_for_schedule() self.logger.info("topo:") self.logger.info(topo) self.logger.info("flows:") self.logger.info(flows) self.logger.info("ctrl_dict:") self.logger.info(ctrl_dict) self.logger.info("dp_dict:") self.logger.info(dp_dict) flows_method,prices = self.scheduler.schedule(topo,flows,ctrl_dict,dp_dict) self.schedule_result = {'methods':flows_method,'prices':prices} flows_buf = {} flows_tag = {} flows_raw = {} self.logger.info("hahahahha let's see jason bug") self.logger.info(flows_method) self.logger.info("prices") self.logger.info(prices) for f_id,method in flows_method.items(): if method == PolicyUtil.TAG: flows_tag.update({f_id:self.flows_to_schedule[f_id]}) elif method == PolicyUtil.BUFFER: flows_buf.update({f_id:self.flows_to_schedule[f_id]}) elif method == PolicyUtil.RAW: flows_raw.update({f_id:self.flows_to_schedule[f_id]}) self.raw_update(flows_raw) self.buf_del(flows_buf) self.tag0(flows_tag) #cal_remain_bandwidth def cal_remain_bw(self): for dp,linkto in self.link_bw.items(): for dpnext,v in linkto.items(): self.link_bw[dp][dpnext] = BW_NUM for f in self.flows.values(): path = f.new if(path): for i in range(0,len(path)-1): self.logger.info(i) self.logger.info(self.link_bw) self.logger.info(path[i]) self.link_bw[path[i]][path[i+1]] -= f.bw #from aggre_dict to InfoMessage def make_and_send_info(self,aggre_dict,old): # self.logger.info("here is aggre_dict") # self.logger.info(aggre_dict) for (ctrl_id,ups) in aggre_dict.items(): info = InfoMessage(ctrl_id) for (flow_id,up) in ups.items(): # self.logger.info(up) try: f = self.flows_to_schedule[flow_id] except: f = self.flows_new[flow_id] f.ctrl_wait.append(ctrl_id) up_msg = UpdateMessageByFlow(flow_id,f.up_type,f.up_step) up_msg.to_add = up['to_add'] up_msg.to_del = up['to_del'] up_msg.version_tag = 2 # up_msg.version_tag = f.old_version_tag if old else f.new_version_tag info.ums.append(up_msg) # self.logger.info(up_msg) f_des = FlowDes(f.src,f.dst,f.dst_port,f.old,f.new,f.up_type,f.trans_type) info.new_flows.append(f_des) self.send_to_local(ctrl_id,info) #for buf #more where-to-buf can be accomplished here def find_buf_dp(self,f): to_add, to_del = tools.diff_old_new(f.old,f.new) self.logger.info(to_del) to_add_bak,to_del_bak = tools.diff_old_new(f.new,[]) self.logger.info("where to buf") where_to_buf = to_del[0] if to_del else to_del_bak[0] self.logger.info(where_to_buf) return where_to_buf #BUF step 1 def buf_del(self,flows={}): aggre_dict = {} # for f_id,f in self.flows_new.items(): for f_id,f in flows.items(): f.up_type = consts.BUF f.up_step = consts.BUF_DEL to_buf = self.find_buf_dp(f) l,dp,n = to_buf f.ctrl_buf = self.dp_to_local[dp] self.logger.info("flow_id"+str(f.flow_id)) self.logger.info("ctrl_buf" + str(f.ctrl_buf)) aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f_id,[],[to_buf]) self.logger.info("her is buf del") self.logger.info(self.flows_to_schedule) self.started_update_to_flows_new(f) self.logger.info(aggre_dict) self.make_and_send_info(aggre_dict,False) #BUF step 2 and 3 def buf_fb_process(self,f_id): aggre_dict = {} f = self.flows_new[f_id] f.ctrl_ok += 1 self.logger.info("-------in buf fb process") self.logger.info(f.up_step) self.logger.info("ctrl_ok" + str(f.ctrl_ok)) self.logger.info("ctrl_wait" + str(f.ctrl_wait)) if(len(f.ctrl_wait) == f.ctrl_ok): f.ctrl_wait = [] f.ctrl_ok = 0 if(f.up_step == consts.BUF_DEL): self.logger.info("buf del over") f.up_step = consts.BUF_ADD to_add, to_del = tools.diff_old_new(f.old,f.new) aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f_id,to_add,to_del) # self.logger.info(aggre_dict) self.make_and_send_info(aggre_dict,False) elif(f.up_step == consts.BUF_ADD): self.logger.info("buf add over") f.up_step = consts.BUF_RLS #firstly we send cmd to the ctrls who bufed,but why?? why some buffed from other dps? # info = InfoMessage(f.ctrl_buf) # um = UpdateMessageByFlow(f_id,f.up_type,f.up_step) # info.ums.append(um) # self.send_to_local(f.ctrl_buf,info) # f.ctrl_wait.append(f.ctrl_buf) #now we let all ctrls to sendback,but why???? for ctrl in self.locals: info = InfoMessage(ctrl) um = UpdateMessageByFlow(f_id,f.up_type,f.up_step) info.ums.append(um) self.send_to_local(ctrl,info) f.ctrl_wait.append(ctrl) elif(f.up_step == consts.BUF_RLS): f.up_step = None f.up_type = None f.ctrl_buf = None self.logger.info(f.flow_id) self.logger.info("updated over by buf") self.finished_update_to_flows(f) self.logger.info("------------------------buf over time--------------") self.logger.info(nowTime()) #for tag def send_mod_packet_vid_cmd(self,f,dp_tup,ifr): self.logger.info("--------------in send mod vid ----------------") info = InfoMessage(f.ctrl_tag) l,dpid,n = dp_tup um = UpdateMessageByFlow(f.flow_id,f.up_type,f.up_step) um.version_tag = 2 self.logger.info(um.up_step) send_ctrl = self.dp_to_local[dpid] if(ifr): um.if_reverse = True um.to_add.append((n,dpid,l)) self.logger.info(um.to_add) f.ctrl_tag_reverse = send_ctrl else: um.to_add.append(dp_tup) f.ctrl_tag = send_ctrl info.ums.append(um) self.send_to_local(send_ctrl,info) if(send_ctrl not in f.ctrl_wait): f.ctrl_wait.append(send_ctrl) def del_vid_flows_cmd(self,f,dp_tup,ifr): self.logger.info("--------------in send mod vid ----------------") info = InfoMessage(f.ctrl_tag) l,dpid,n = dp_tup um = UpdateMessageByFlow(f.flow_id,f.up_type,f.up_step) um.version_tag = 2 self.logger.info(um.up_step) send_ctrl = self.dp_to_local[dpid] if(ifr): um.if_reverse = True um.to_del.append((n,dpid,l)) self.logger.info(um.to_add) f.ctrl_tag_reverse = send_ctrl else: um.to_del.append(dp_tup) f.ctrl_tag = send_ctrl info.ums.append(um) self.send_to_local(send_ctrl,info) if(send_ctrl not in f.ctrl_wait): f.ctrl_wait.append(send_ctrl) def deepcp_to_deal(self,a): result = [] for item in a: result.append(item) return result def tag0(self,flows={}): aggre_dict = {} for f_id, f in flows.items(): f.up_type = consts.TAG f.up_step = 0 to_add,to_del = tools.diff_old_new(f.old,f.new) raw_to_add,raw_to_del = self.deepcp_to_deal(to_add),self.deepcp_to_deal(to_del) aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f_id,to_add,to_del) left,right = None,None #the points who pop packets' vid try: right = to_add[-1] to_add.pop(-1) except: self.logger.info("right wrong") try: left = to_add[0] to_add.pop(0) except: self.logger.info("left wrong") self.tag_flows_temp.update({f_id:{'to_add':to_add,'to_del':to_del, 'left':left,'right':right, 'raw_to_add':raw_to_add,'raw_to_del':raw_to_del}}) self.logger.info(" tag0") self.logger.info(self.tag_flows_temp) self.make_and_send_info(aggre_dict,False) for f_id, f in flows.items(): self.started_update_to_flows_new(f) def tag1_pop_add(self,f): self.logger.info(" in tag1") f.up_step = consts.TAG_POP_ADD up_infos = self.tag_flows_temp[f.flow_id] if(up_infos['left']): self.send_mod_packet_vid_cmd(f,up_infos['left'],False) if(up_infos['right']): self.send_mod_packet_vid_cmd(f,up_infos['right'],True) def tag2_add_flows(self,f): self.logger.info(" in tag 2") aggre_dict = {} f.up_step = consts.TAG_NEW_TAG to_add = self.tag_flows_temp[f.flow_id]['to_add'] if(len(to_add) == 0): self.tag3_push_new(f) return aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f.flow_id,to_add,[]) self.make_and_send_info(aggre_dict,False) def tag3_push_new(self,f): self.logger.info(" in tag 3") self.logger.info("!!!!!!!!!!!!!!!!!!in tag push vid for packets") f.up_step = consts.TAG_PUSH_NEW up_infos = self.tag_flows_temp[f.flow_id] if(not up_infos['right'] and not up_infos['left']): self.logger.info("no new") self.tag4_del_old(f) return try: self.send_mod_packet_vid_cmd(f,up_infos['left'],False) except: self.logger.info("no left") try: self.send_mod_packet_vid_cmd(f,up_infos['right'],True) except: self.logger.info("no right") def tag4_del_old(self,f): self.logger.info(" in tag 4") f.up_step = consts.TAG_DEL_OLD aggre_dict = {} # to_del = self.tag_flows_temp[f.flow_id]['to_del'] to_del = self.tag_flows_temp[f.flow_id]['raw_to_del'] if(len(to_del) == 0): self.tag5_mod_new(f) return aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f.flow_id,[],to_del) self.make_and_send_info(aggre_dict,True) def tag5_mod_new(self,f): aggre_dict = {} f.up_step = consts.TAG_MOD_NEW self.logger.info("in tag 5") self.logger.info(self.flows_new) # to_add = self.flows_new[f.flow_id]['to_add'] to_add = self.tag_flows_temp[f.flow_id]['raw_to_add'] if(len(to_add) == 0): self.tag6_push_del(f) return aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f.flow_id,to_add,[]) self.make_and_send_info(aggre_dict,False) def tag6_push_del(self,f): self.logger.info(" in tag 6") f.up_step = consts.TAG_PUSH_DEL # self.logger.info("!!!!!!!!!!!!!!!!!!in tag 6") self.logger.info("tag_flows_temp") self.logger.info(self.tag_flows_temp) up_infos = self.tag_flows_temp[f.flow_id] if(not up_infos['right'] and not up_infos['left']): self.logger.info("no new") self.tag7_pop_del(f) return try: self.del_vid_flows_cmd(f,up_infos['left'],False) except: self.logger.info("no left") try: self.del_vid_flows_cmd(f,up_infos['right'],True) except: self.logger.info("no right") def tag7_pop_del(self,f): self.logger.info(" in tag 7") f.up_step = consts.TAG_POP_DEL aggre_dict = {} # to_del = self.flows_new[f.flow_id]['to_add'] # self.logger.info("!!!!!!!!!!!!!!!!!!in tag 7") self.logger.info("tag_flows_temp") self.logger.info(self.tag_flows_temp) to_del = self.tag_flows_temp[f.flow_id]['raw_to_add'] if(len(to_del) == 0): self.logger.info("update over by tag") # self.finished_update_to_flows(f) # self.tag_flows_temp = {} return aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f.flow_id,[],to_del) self.logger.info("let's see tag step 7's bug haha") self.logger.info(aggre_dict) self.make_and_send_info(aggre_dict,True) # self.finished_update_to_flows(f) # self.tag_flows_temp = {} def tag_fb_process_new(self,f_id): self.logger.info("in tag fb") f = self.flows_new[f_id] f.ctrl_ok += 1 self.logger.info(f.ctrl_wait) self.logger.info(f.ctrl_ok) self.logger.info(f.up_step) aggre_dict = {} if(len(f.ctrl_wait) == f.ctrl_ok): f.ctrl_wait = [] f.ctrl_ok = 0 if(f.up_step == 0): self.logger.info(f_id + "tag 0 info telled every one") self.tag1_pop_add(f) elif(f.up_step == consts.TAG_POP_ADD): self.logger.info(f_id + "tag 1 pop add finished") self.tag2_add_flows(f) elif(f.up_step == consts.TAG_NEW_TAG): self.logger.info( f_id + "tag 2 new tag finished") self.tag3_push_new(f) elif(f.up_step == consts.TAG_PUSH_NEW): self.logger.info(f_id + "tag 3 push new finished") self.tag4_del_old(f) # # self.logger.info("update over by tag") # # self.logger.info(nowTime()) elif(f.up_step == consts.TAG_DEL_OLD): self.logger.info(f_id + "tag 4 push new finished") self.tag5_mod_new(f) elif(f.up_step == consts.TAG_MOD_NEW): self.logger.info(f_id + "tag 5 push new finished") self.tag6_push_del(f) elif(f.up_step == consts.TAG_PUSH_DEL): self.logger.info(f_id + "tag 6 push new finished") self.tag7_pop_del(f) elif(f.up_step == consts.TAG_POP_DEL): self.logger.info(f_id + "tag 7 push new finished") # self.logger.info("update over by tag") #for raw def raw_update(self,flows={}): aggre_dict = {} for f_id,f in flows.items(): f.up_type = consts.RAW f.up_step = consts.RAW_INSTALL to_add, to_del = tools.diff_old_new(f.old,f.new) self.logger.info("let's see raw update bug") self.logger.info(to_add) self.logger.info(to_del) aggre_dict = tools.flowkey_to_ctrlkey(aggre_dict,self.dp_to_local,f_id,to_add,to_del) self.make_and_send_info(aggre_dict,False) self.started_update_to_flows_new(f) def raw_fb_process(self,f_id): f = self.flows_new[f_id] f.ctrl_ok += 1 self.logger.info(f.up_step) if(len(f.ctrl_wait) == f.ctrl_ok): f.ctrl_wait = [] f.ctrl_ok = 0 if(f.up_step == consts.RAW_INSTALL): self.logger.info("up over by raw") self.logger.info(nowTime()) self.finished_update_to_flows(f) #for communicate with local def init_socks(self): for local_id in self.locals: c = socket.socket() host = socket.gethostbyname('127.0.0.1') c.connect((host,6000 + local_id)) self.sockets[local_id] = c def send_to_local(self,local_id,msg): # self.logger.info("-------------in send to local") str_message = pickle.dumps(msg) msg_len = len(str_message) data = struct.pack('L', msg_len) + str_message # self.logger.info(str_message) self.sockets[local_id].sendall(data) def global_conn(self,fd): self.logger.info("--------------a connection-------------------") while True: data = fd.recv(8) if(len(data) == 0): fd.close() return msg_len = struct.unpack('L',data)[0] # self.logger.info("global get the fb len") # self.logger.info(msg_len) more_msg = tools.recv_size(fd,msg_len) # self.logger.info(more_msg) msg = pickle.loads(more_msg) eventlet.spawn(self.process_fd_msg,msg) # print(msg) def run_fd_server(self): server = eventlet.listen(('127.0.0.1', consts.GLOBAL_FB_PORT)) pool = GreenPool(10000) while True: fd, addr = server.accept() #accept returns (conn,address) so fd is
options={ 'verbose_name': '01 SKPD Asal ATL Setwan', 'proxy': True, 'verbose_name_plural': '01 SKPD Asal ATL Setwan', }, bases=('atl.skpdasalatl',), ), migrations.CreateModel( name='SKPDAsalATLSosial', fields=[ ], options={ 'verbose_name': '09 SKPD Asal ATL Sosial', 'proxy': True, 'verbose_name_plural': '09 SKPD Asal ATL Sosial', }, bases=('atl.skpdasalatl',), ), migrations.CreateModel( name='SKPDAsalATLTebingTinggi', fields=[ ], options={ 'verbose_name': '38 SKPD Asal ATL Tebing Tinggi', 'proxy': True, 'verbose_name_plural': '38 SKPD Asal ATL Tebing Tinggi', }, bases=('atl.skpdasalatl',), ), migrations.CreateModel( name='SKPDTujuanATLAwayan', fields=[ ], options={ 'verbose_name': '34 SKPD Tujuan ATL Awayan', 'proxy': True, 'verbose_name_plural': '34 SKPD Tujuan ATL Awayan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBAPPEDA', fields=[ ], options={ 'verbose_name': '21 SKPD Tujuan ATL BAPPEDA', 'proxy': True, 'verbose_name_plural': '21 SKPD Tujuan ATL BAPPEDA', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBatumandi', fields=[ ], options={ 'verbose_name': '32 SKPD Tujuan ATL Batumandi', 'proxy': True, 'verbose_name_plural': '32 SKPD Tujuan ATL Batumandi', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBatuPiring', fields=[ ], options={ 'verbose_name': '37 SKPD Tujuan ATL Batu Piring', 'proxy': True, 'verbose_name_plural': '37 SKPD Tujuan ATL Batu Piring', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBKD', fields=[ ], options={ 'verbose_name': '19 SKPD Tujuan ATL BKD', 'proxy': True, 'verbose_name_plural': '19 SKPD Tujuan ATL BKD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBKPPD', fields=[ ], options={ 'verbose_name': '26 SKPD Tujuan ATL BKPPD', 'proxy': True, 'verbose_name_plural': '26 SKPD Tujuan ATL BKPPD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBPBD', fields=[ ], options={ 'verbose_name': '39 SKPD Tujuan ATL BPBD', 'proxy': True, 'verbose_name_plural': '39 SKPD Tujuan ATL BPBD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLBPPD', fields=[ ], options={ 'verbose_name': '48 SKPD Tujuan ATL BPPD', 'proxy': True, 'verbose_name_plural': '48 SKPD Tujuan ATL BPPD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDinkes', fields=[ ], options={ 'verbose_name': '05 SKPD Tujuan ATL Dinkes', 'proxy': True, 'verbose_name_plural': '05 SKPD Tujuan ATL Dinkes', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDisdik', fields=[ ], options={ 'verbose_name': '07 SKPD Tujuan ATL Disdik', 'proxy': True, 'verbose_name_plural': '07 SKPD Tujuan ATL Disdik', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDishub', fields=[ ], options={ 'verbose_name': '04 SKPD Tujuan ATL Dishub', 'proxy': True, 'verbose_name_plural': '04 SKPD Tujuan ATL Dishub', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDisnakertrans', fields=[ ], options={ 'verbose_name': '41 SKPD Tujuan ATL Disnakertrans', 'proxy': True, 'verbose_name_plural': '41 SKPD Tujuan ATL Disnakertrans', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDistamben', fields=[ ], options={ 'verbose_name': '17 SKPD Tujuan ATL Distamben', 'proxy': True, 'verbose_name_plural': '17 SKPD Tujuan ATL Distamben', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDKO', fields=[ ], options={ 'verbose_name': '23 SKPD Tujuan ATL DKO', 'proxy': True, 'verbose_name_plural': '23 SKPD Tujuan ATL DKO', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDKP', fields=[ ], options={ 'verbose_name': '15 SKPD Tujuan ATL DKP', 'proxy': True, 'verbose_name_plural': '15 SKPD Tujuan ATL DKP', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDKUKMP', fields=[ ], options={ 'verbose_name': '16 SKPD Tujuan ATL DKUKMP', 'proxy': True, 'verbose_name_plural': '16 SKPD Tujuan ATL DKUKMP', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDLH', fields=[ ], options={ 'verbose_name': '22 SKPD Tujuan ATL DLH', 'proxy': True, 'verbose_name_plural': '22 SKPD Tujuan ATL DLH', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPKP', fields=[ ], options={ 'verbose_name': '40 SKPD Tujuan ATL DPKP', 'proxy': True, 'verbose_name_plural': '40 SKPD Tujuan ATL DPKP', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPMD', fields=[ ], options={ 'verbose_name': '10 SKPD Tujuan ATL DPMD', 'proxy': True, 'verbose_name_plural': '10 SKPD Tujuan ATL DPMD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPMPTSP', fields=[ ], options={ 'verbose_name': '18 SKPD Tujuan ATL DPMPTSP', 'proxy': True, 'verbose_name_plural': '18 SKPD Tujuan ATL DPMPTSP', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPPKB', fields=[ ], options={ 'verbose_name': '42 SKPD Tujuan ATL DPPKB', 'proxy': True, 'verbose_name_plural': '42 SKPD Tujuan ATL DPPKB', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPPPA', fields=[ ], options={ 'verbose_name': '11 SKPD Tujuan ATL DPPPA', 'proxy': True, 'verbose_name_plural': '11 SKPD Tujuan ATL DPPPA', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDPUPR', fields=[ ], options={ 'verbose_name': '03 SKPD Tujuan ATL DPUPR', 'proxy': True, 'verbose_name_plural': '03 SKPD Tujuan ATL DPUPR', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLDukCatPil', fields=[ ], options={ 'verbose_name': '12 SKPD Tujuan ATL DukCatPil', 'proxy': True, 'verbose_name_plural': '12 SKPD Tujuan ATL DukCatPil', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLHalong', fields=[ ], options={ 'verbose_name': '35 SKPD Tujuan ATL Halong', 'proxy': True, 'verbose_name_plural': '35 SKPD Tujuan ATL Halong', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLInspektorat', fields=[ ], options={ 'verbose_name': '20 SKPD Tujuan ATL Inspektorat', 'proxy': True, 'verbose_name_plural': '20 SKPD Tujuan ATL Inspektorat', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLJuai', fields=[ ], options={ 'verbose_name': '33 SKPD Tujuan ATL Juai', 'proxy': True, 'verbose_name_plural': '33 SKPD Tujuan ATL Juai', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLKearsipan', fields=[ ], options={ 'verbose_name': '44 SKPD Tujuan ATL Kearsipan', 'proxy': True, 'verbose_name_plural': '44 SKPD Tujuan ATL Kearsipan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLKehutanan', fields=[ ], options={ 'verbose_name': '14 SKPD Tujuan ATL Kehutanan', 'proxy': True, 'verbose_name_plural': '14 SKPD Tujuan ATL Kehutanan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLKESBANGPOL', fields=[ ], options={ 'verbose_name': '24 SKPD Tujuan ATL KESBANGPOL', 'proxy': True, 'verbose_name_plural': '24 SKPD Tujuan ATL KESBANGPOL', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLKominfo', fields=[ ], options={ 'verbose_name': '43 SKPD Tujuan ATL Kominfo', 'proxy': True, 'verbose_name_plural': '43 SKPD Tujuan ATL Kominfo', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLLampihong', fields=[ ], options={ 'verbose_name': '31 SKPD Tujuan ATL Lampihong', 'proxy': True, 'verbose_name_plural': '31 SKPD Tujuan ATL Lampihong', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLParingin', fields=[ ], options={ 'verbose_name': '28 SKPD Tujuan ATL Paringin', 'proxy': True, 'verbose_name_plural': '28 SKPD Tujuan ATL Paringin', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLParinginKota', fields=[ ], options={ 'verbose_name': '29 SKPD Tujuan ATL Paringin Kota', 'proxy': True, 'verbose_name_plural': '29 SKPD Tujuan ATL Paringin Kota', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLParinginSelatan', fields=[ ], options={ 'verbose_name': '36 SKPD Tujuan ATL Paringin Selatan', 'proxy': True, 'verbose_name_plural': '36 SKPD Tujuan ATL Paringin Selatan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLParinginTimur', fields=[ ], options={ 'verbose_name': '30 SKPD Tujuan ATL Paringin Timur', 'proxy': True, 'verbose_name_plural': '30 SKPD Tujuan ATL Paringin Timur', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLPariwisata', fields=[ ], options={ 'verbose_name': '46 SKPD Tujuan ATL Pariwisata', 'proxy': True, 'verbose_name_plural': '46 SKPD Tujuan ATL Pariwisata', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLPerdagangan', fields=[ ], options={ 'verbose_name': '47 SKPD Tujuan ATL Perdagangan', 'proxy': True, 'verbose_name_plural': '47 SKPD Tujuan ATL Perdagangan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLPerikanan', fields=[ ], options={ 'verbose_name': '45 SKPD Tujuan ATL Perikanan', 'proxy': True, 'verbose_name_plural': '45 SKPD Tujuan ATL Perikanan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLPerpustakaan', fields=[ ], options={ 'verbose_name': '08 SKPD Tujuan ATL Perpustakaan', 'proxy': True, 'verbose_name_plural': '08 SKPD Tujuan ATL Perpustakaan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLPertanian', fields=[ ], options={ 'verbose_name': '13 SKPD Tujuan ATL Pertanian', 'proxy': True, 'verbose_name_plural': '13 SKPD Tujuan ATL Pertanian', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLRSUD', fields=[ ], options={ 'verbose_name': '06 SKPD Tujuan ATL RSUD', 'proxy': True, 'verbose_name_plural': '06 SKPD Tujuan ATL RSUD', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLSATPOLPP', fields=[ ], options={ 'verbose_name': '25 SKPD Tujuan ATL SATPOLPP', 'proxy': True, 'verbose_name_plural': '25 SKPD Tujuan ATL SATPOLPP', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLSekretariatKorpri', fields=[ ], options={ 'verbose_name': '27 SKPD Tujuan ATL Sekretariat Korpri', 'proxy': True, 'verbose_name_plural': '27 SKPD Tujuan ATL Sekretariat Korpri', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLSetda', fields=[ ], options={ 'verbose_name': '02 SKPD Tujuan ATL Setda', 'proxy': True, 'verbose_name_plural': '02 SKPD Tujuan ATL Setda', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLSetwan', fields=[ ], options={ 'verbose_name': '01 SKPD Tujuan ATL Setwan', 'proxy': True, 'verbose_name_plural': '01 SKPD Tujuan ATL Setwan', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLSosial', fields=[ ], options={ 'verbose_name': '09 SKPD Tujuan ATL Sosial', 'proxy': True, 'verbose_name_plural': '09 SKPD Tujuan ATL Sosial', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='SKPDTujuanATLTebingTinggi', fields=[ ], options={ 'verbose_name': '38 SKPD Tujuan ATL Tebing Tinggi', 'proxy': True, 'verbose_name_plural': '38 SKPD Tujuan ATL Tebing Tinggi', }, bases=('atl.skpdtujuanatl',), ), migrations.CreateModel( name='TahunBerkurangATLAwayan', fields=[ ], options={ 'verbose_name': '34 Tahun Berkurang ATL Awayan', 'proxy': True, 'verbose_name_plural': '34 Tahun Berkurang ATL Awayan', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBAPPEDA', fields=[ ], options={ 'verbose_name': '21 Tahun Berkurang ATL BAPPEDA', 'proxy': True, 'verbose_name_plural': '21 Tahun Berkurang ATL BAPPEDA', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBatumandi', fields=[ ], options={ 'verbose_name': '32 Tahun Berkurang ATL Batumandi', 'proxy': True, 'verbose_name_plural': '32 Tahun Berkurang ATL Batumandi', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBatuPiring', fields=[ ], options={ 'verbose_name': '37 Tahun Berkurang ATL Batu Piring', 'proxy': True, 'verbose_name_plural': '37 Tahun Berkurang ATL Batu Piring', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBKD', fields=[ ], options={ 'verbose_name': '19 Tahun Berkurang ATL BKD', 'proxy': True, 'verbose_name_plural': '19 Tahun Berkurang ATL BKD', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBKPPD', fields=[ ], options={ 'verbose_name': '26 Tahun Berkurang ATL BKPPD', 'proxy': True, 'verbose_name_plural': '26 Tahun Berkurang ATL BKPPD', }, bases=('atl.tahunberkurangatl',), ), migrations.CreateModel( name='TahunBerkurangATLBPBD', fields=[ ], options={ 'verbose_name': '39 Tahun Berkurang ATL BPBD', 'proxy': True, 'verbose_name_plural':
+ m.b263 - m.b298 <= 0) m.c4515 = Constraint(expr= - m.b262 + m.b264 - m.b299 <= 0) m.c4516 = Constraint(expr= - m.b263 + m.b264 - m.b300 <= 0) m.c4517 = Constraint(expr= - m.b265 + m.b266 - m.b273 <= 0) m.c4518 = Constraint(expr= - m.b265 + m.b267 - m.b274 <= 0) m.c4519 = Constraint(expr= - m.b265 + m.b268 - m.b275 <= 0) m.c4520 = Constraint(expr= - m.b265 + m.b269 - m.b276 <= 0) m.c4521 = Constraint(expr= - m.b265 + m.b270 - m.b277 <= 0) m.c4522 = Constraint(expr= - m.b265 + m.b271 - m.b278 <= 0) m.c4523 = Constraint(expr= - m.b265 + m.b272 - m.b279 <= 0) m.c4524 = Constraint(expr= - m.b266 + m.b267 - m.b280 <= 0) m.c4525 = Constraint(expr= - m.b266 + m.b268 - m.b281 <= 0) m.c4526 = Constraint(expr= - m.b266 + m.b269 - m.b282 <= 0) m.c4527 = Constraint(expr= - m.b266 + m.b270 - m.b283 <= 0) m.c4528 = Constraint(expr= - m.b266 + m.b271 - m.b284 <= 0) m.c4529 = Constraint(expr= - m.b266 + m.b272 - m.b285 <= 0) m.c4530 = Constraint(expr= - m.b267 + m.b268 - m.b286 <= 0) m.c4531 = Constraint(expr= - m.b267 + m.b269 - m.b287 <= 0) m.c4532 = Constraint(expr= - m.b267 + m.b270 - m.b288 <= 0) m.c4533 = Constraint(expr= - m.b267 + m.b271 - m.b289 <= 0) m.c4534 = Constraint(expr= - m.b267 + m.b272 - m.b290 <= 0) m.c4535 = Constraint(expr= - m.b268 + m.b269 - m.b291 <= 0) m.c4536 = Constraint(expr= - m.b268 + m.b270 - m.b292 <= 0) m.c4537 = Constraint(expr= - m.b268 + m.b271 - m.b293 <= 0) m.c4538 = Constraint(expr= - m.b268 + m.b272 - m.b294 <= 0) m.c4539 = Constraint(expr= - m.b269 + m.b270 - m.b295 <= 0) m.c4540 = Constraint(expr= - m.b269 + m.b271 - m.b296 <= 0) m.c4541 = Constraint(expr= - m.b269 + m.b272 - m.b297 <= 0) m.c4542 = Constraint(expr= - m.b270 + m.b271 - m.b298 <= 0) m.c4543 = Constraint(expr= - m.b270 + m.b272 - m.b299 <= 0) m.c4544 = Constraint(expr= - m.b271 + m.b272 - m.b300 <= 0) m.c4545 = Constraint(expr= - m.b273 + m.b274 - m.b280 <= 0) m.c4546 = Constraint(expr= - m.b273 + m.b275 - m.b281 <= 0) m.c4547 = Constraint(expr= - m.b273 + m.b276 - m.b282 <= 0) m.c4548 = Constraint(expr= - m.b273 + m.b277 - m.b283 <= 0) m.c4549 = Constraint(expr= - m.b273 + m.b278 - m.b284 <= 0) m.c4550 = Constraint(expr= - m.b273 + m.b279 - m.b285 <= 0) m.c4551 = Constraint(expr= - m.b274 + m.b275 - m.b286 <= 0) m.c4552 = Constraint(expr= - m.b274 + m.b276 - m.b287 <= 0) m.c4553 = Constraint(expr= - m.b274 + m.b277 - m.b288 <= 0) m.c4554 = Constraint(expr= - m.b274 + m.b278 - m.b289 <= 0) m.c4555 = Constraint(expr= - m.b274 + m.b279 - m.b290 <= 0) m.c4556 = Constraint(expr= - m.b275 + m.b276 - m.b291 <= 0) m.c4557 = Constraint(expr= - m.b275 + m.b277 - m.b292 <= 0) m.c4558 = Constraint(expr= - m.b275 + m.b278 - m.b293 <= 0) m.c4559 = Constraint(expr= - m.b275 + m.b279 - m.b294 <= 0) m.c4560 = Constraint(expr= - m.b276 + m.b277 - m.b295 <= 0) m.c4561 = Constraint(expr= - m.b276 + m.b278 - m.b296 <= 0) m.c4562 = Constraint(expr= - m.b276 + m.b279 - m.b297 <= 0) m.c4563 = Constraint(expr= - m.b277 + m.b278 - m.b298 <= 0) m.c4564 = Constraint(expr= - m.b277 + m.b279 - m.b299 <= 0) m.c4565 = Constraint(expr= - m.b278 + m.b279 - m.b300 <= 0) m.c4566 = Constraint(expr= - m.b280 + m.b281 - m.b286 <= 0) m.c4567 = Constraint(expr= - m.b280 + m.b282 - m.b287 <= 0) m.c4568 = Constraint(expr= - m.b280 + m.b283 - m.b288 <= 0) m.c4569 = Constraint(expr= - m.b280 + m.b284 - m.b289 <= 0) m.c4570 = Constraint(expr= - m.b280 + m.b285 - m.b290 <= 0) m.c4571 = Constraint(expr= - m.b281 + m.b282 - m.b291 <= 0) m.c4572 = Constraint(expr= - m.b281 + m.b283 - m.b292 <= 0) m.c4573 = Constraint(expr= - m.b281 + m.b284 - m.b293 <= 0) m.c4574 = Constraint(expr= - m.b281 + m.b285 - m.b294 <= 0) m.c4575 = Constraint(expr= - m.b282 + m.b283 - m.b295 <= 0) m.c4576 = Constraint(expr= - m.b282 + m.b284 - m.b296 <= 0) m.c4577 = Constraint(expr= - m.b282 + m.b285 - m.b297 <= 0) m.c4578 = Constraint(expr= - m.b283 + m.b284 - m.b298 <= 0) m.c4579 = Constraint(expr= - m.b283 + m.b285 - m.b299 <= 0) m.c4580 = Constraint(expr= - m.b284 + m.b285 - m.b300 <= 0) m.c4581 = Constraint(expr= - m.b286 + m.b287 - m.b291 <= 0) m.c4582 = Constraint(expr= - m.b286 + m.b288 - m.b292 <= 0) m.c4583 = Constraint(expr= - m.b286 + m.b289 - m.b293 <= 0) m.c4584 = Constraint(expr= - m.b286 + m.b290 - m.b294 <= 0) m.c4585 = Constraint(expr= - m.b287 + m.b288 - m.b295 <= 0) m.c4586 = Constraint(expr= - m.b287 + m.b289 - m.b296 <= 0) m.c4587 = Constraint(expr= - m.b287 + m.b290 - m.b297 <= 0) m.c4588 = Constraint(expr= - m.b288 + m.b289 - m.b298 <= 0) m.c4589 = Constraint(expr= - m.b288 + m.b290 - m.b299 <= 0) m.c4590 = Constraint(expr= - m.b289 + m.b290 - m.b300 <= 0) m.c4591 = Constraint(expr= - m.b291 + m.b292 - m.b295 <= 0) m.c4592 = Constraint(expr= - m.b291 + m.b293 - m.b296 <= 0) m.c4593 = Constraint(expr= - m.b291 + m.b294 - m.b297 <= 0) m.c4594 = Constraint(expr= - m.b292 + m.b293 - m.b298 <= 0) m.c4595 = Constraint(expr= - m.b292 + m.b294 - m.b299 <= 0) m.c4596 = Constraint(expr= - m.b293 + m.b294 - m.b300 <= 0) m.c4597 = Constraint(expr= - m.b295 + m.b296 - m.b298 <= 0) m.c4598 = Constraint(expr= - m.b295 + m.b297 - m.b299 <= 0) m.c4599 = Constraint(expr= - m.b296 + m.b297 - m.b300 <= 0) m.c4600 = Constraint(expr= - m.b298 + m.b299 - m.b300 <= 0) m.c4601 = Constraint(expr=8*m.b1*m.b2 - 2*m.b1 - 15*m.b2 + 20*m.b1*m.b4 + 13*m.b4 + 4*m.b1*m.b7 - 52*m.b7 + 4*m.b1*m.b8 - 78*m.b8 + 2*m.b1*m.b9 - 64*m.b9 + 10*m.b1*m.b10 - 54*m.b10 + 2*m.b1*m.b16 - 77*m.b16 + 12* m.b1*m.b17 - 76*m.b17 + 2*m.b1*m.b18 - 100*m.b18 + 4*m.b1*m.b20 - 104*m.b20 + 4*m.b1*m.b21 - 93*m.b21 + 10*m.b1*m.b22 - 110*m.b22 + 2*m.b1*m.b23 - 96*m.b23 + 20*m.b1*m.b24 - 126*m.b24 - 4 *m.b1*m.b25 - 9*m.b25 - 20*m.b1*m.b28 - 60*m.b28 - 10*m.b1*m.b29 - 52*m.b29 - 10*m.b1*m.b31 - 64*m.b31 - 4*m.b1*m.b32 - 50*m.b32 - 4*m.b1*m.b35 - 28*m.b35 - 10*m.b1*m.b37 - 37*m.b37 - 6* m.b1*m.b38 - 62*m.b38 - 2*m.b1*m.b40 - 54*m.b40 - 20*m.b1*m.b41 - 70*m.b41 - 4*m.b1*m.b43 - 72 *m.b43 - 2*m.b1*m.b44 - 63*m.b44 - 2*m.b1*m.b45 - 82*m.b45 - 2*m.b1*m.b46 - 76*m.b46 + 6*m.b2* m.b3 - 6*m.b3 + 8*m.b2*m.b4 + 10*m.b2*m.b5 - 58*m.b5 + 10*m.b2*m.b6 - 60*m.b6 + 10*m.b2*m.b7 + 2*m.b2*m.b8 + 8*m.b2*m.b9 + 8*m.b2*m.b11 - 46*m.b11 + 8*m.b2*m.b13 - 69*m.b13 + 6*m.b2* m.b15 - 84*m.b15 + 4*m.b2*m.b16 + 10*m.b2*m.b17 + 10*m.b2*m.b18 + 4*m.b2*m.b19 - 103*m.b19 + 6 *m.b2*m.b22 + 2*m.b2*m.b23 + 6*m.b2*m.b25 - 20*m.b2*m.b50 - 45*m.b50 - 10*m.b2*m.b51 - 47* m.b51 - 10*m.b2*m.b53 - 71*m.b53 - 4*m.b2*m.b54 - 57*m.b54 - 4*m.b2*m.b57 - 41*m.b57 - 10*m.b2 *m.b59 - 58*m.b59 - 6*m.b2*m.b60 - 83*m.b60 - 2*m.b2*m.b62 - 71*m.b62 - 20*m.b2*m.b63 - 95* m.b63 - 4*m.b2*m.b65 - 107*m.b65 - 2*m.b2*m.b66 - 94*m.b66 - 2*m.b2*m.b67 - 103*m.b67 - 2*m.b2 *m.b68 - 101*m.b68 + 4*m.b3*m.b6 + 4*m.b3*m.b7 + 12*m.b3*m.b9 + 4*m.b3*m.b10 + 10*m.b3*m.b11 + 4*m.b3*m.b12 - 34*m.b12 + 10*m.b3*m.b13 + 2*m.b3*m.b14 - 53*m.b14 + 2*m.b3*m.b15 + 2*m.b3* m.b16 + 4*m.b3*m.b17 + 4*m.b3*m.b18 + 8*m.b3*m.b19 + 4*m.b3*m.b20 + 4*m.b3*m.b22 + 4*m.b3* m.b23 + 10*m.b3*m.b24 + 6*m.b3*m.b26 - 8*m.b26 + 4*m.b3*m.b48 + m.b48 - 20*m.b3*m.b71 - 28* m.b71 - 10*m.b3*m.b72 - 20*m.b72 - 10*m.b3*m.b74 - 40*m.b74 - 4*m.b3*m.b75 - 18*m.b75 - 4*m.b3 *m.b78 - 20*m.b78 - 10*m.b3*m.b80 - 43*m.b80 - 6*m.b3*m.b81 - 64*m.b81 - 2*m.b3*m.b83 - 42* m.b83 - 20*m.b3*m.b84 - 60*m.b84 - 4*m.b3*m.b86 - 80*m.b86 - 2*m.b3*m.b87 - 71*m.b87 - 2*m.b3* m.b88 - 74*m.b88 - 2*m.b3*m.b89 - 74*m.b89 + 4*m.b4*m.b5 + 4*m.b4*m.b14 + 4*m.b4*m.b17 + 10* m.b4*m.b19 + 4*m.b4*m.b21 + 2*m.b4*m.b22 + 4*m.b4*m.b24 + 6*m.b4*m.b27 + 11*m.b27 + 4*m.b4* m.b49 + 34*m.b49 - 20*m.b4*m.b91 - 69*m.b91 - 10*m.b4*m.b92 - 61*m.b92 - 10*m.b4*m.b94 - 77* m.b94 - 4*m.b4*m.b95 - 43*m.b95 - 4*m.b4*m.b98 - 27*m.b98 - 10*m.b4*m.b100 - 38*m.b100 - 6* m.b4*m.b101 - 61*m.b101 - 2*m.b4*m.b103 - 33*m.b103 - 20*m.b4*m.b104 - 51*m.b104 - 4*m.b4* m.b106 - 69*m.b106 - 2*m.b4*m.b107 - 60*m.b107 - 2*m.b4*m.b108 - 63*m.b108 - 2*m.b4*m.b109 - 61*m.b109 + 20*m.b5*m.b6 +
params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] R49 = params["R49"] R50 = params["R50"] R51 = params["R51"] R52 = params["R52"] R53 = params["R53"] R54 = params["R54"] R55 = params["R55"] R56 = params["R56"] R57 = params["R57"] R58 = params["R58"] R59 = params["R59"] R60 = params["R60"] R61 = params["R61"] R62 = params["R62"] R63 = params["R63"] R64 = params["R64"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) + (R49 / (1 + w * 1j * t_values[48])) + (R50 / (1 + w * 1j * t_values[49])) + (R51 / (1 + w * 1j * t_values[50])) + (R52 / (1 + w * 1j * t_values[51])) + (R53 / (1 + w * 1j * t_values[52])) + (R54 / (1 + w * 1j * t_values[53])) + (R55 / (1 + w * 1j * t_values[54])) + (R56 / (1 + w * 1j * t_values[55])) + (R57 / (1 + w * 1j * t_values[56])) + (R58 / (1 + w * 1j * t_values[57])) + (R59 / (1 + w * 1j * t_values[58])) + (R60 / (1 + w * 1j * t_values[59])) + (R61 / (1 + w * 1j * t_values[60])) + (R62 / (1 + w * 1j * t_values[61])) + (R63 / (1 + w * 1j * t_values[62])) + (R64 / (1 + w * 1j * t_values[63])) ) def KK_RC65_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- <NAME> (<EMAIL> / <EMAIL>) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] R49 = params["R49"] R50 = params["R50"] R51 = params["R51"] R52 = params["R52"] R53 = params["R53"] R54 = params["R54"] R55 = params["R55"] R56 = params["R56"] R57 = params["R57"] R58 = params["R58"] R59 = params["R59"] R60 = params["R60"] R61 = params["R61"] R62 = params["R62"] R63 = params["R63"] R64 = params["R64"] R65 = params["R65"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j
"", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "eGAS": { "symbol": "eGAS", "address": "0xb53A96bcBdD9CF78dfF20BAB6C2be7bAec8f00f8", "decimals": 8, "name": "<NAME>", "ens_address": "", "website": "http://www.ethgas.stream", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "http://www.ethgas.stream" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "https://twitter.com/eth_gas", "youtube": "" } }, "LYM": { "symbol": "LYM", "name": "Lympo", "type": "ERC20", "address": "0x57aD67aCf9bF015E4820Fbd66EA1A21BED8852eC", "ens_address": "", "decimals": 18, "website": "https://lympo.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "https://medium.com/@lympo.io", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "https://www.reddit.com/r/LYMPO", "slack": "", "telegram": "", "twitter": "https://twitter.com/Lympo_io", "youtube": "" } }, "BANX": { "symbol": "BANX", "address": "0xF87F0D9153fea549c728Ad61cb801595a68b73de", "decimals": 18, "name": "BANX", "ens_address": "", "website": "https://pre.ubanx.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "eUSD": { "symbol": "eUSD", "address": "0x523630976eB6147621B5c31c781eBe2Ec2a806E0", "decimals": 18, "name": "Ether-Backed USD Nomins (erc20)", "ens_address": "", "website": "https://havven.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "" }, "social": { "blog": "https://blog.havven.io", "chat": "https://t.me/havven_official1", "facebook": "https://www.facebook.com/havven.io/", "forum": "https://bitcointalk.org/index.php?topic=2702470", "github": "https://github.com/havven/havven", "gitter": "", "instagram": "", "linkedin": "https://www.linkedin.com/company/havven/", "reddit": "https://www.reddit.com/r/havven/", "slack": "", "telegram": "https://t.me/havven_official1", "twitter": "https://twitter.com/havven_io", "youtube": "https://www.youtube.com/channel/UCGrPolyOAj3cODJIN5ssVTw" } }, "FLMC": { "symbol": "FLMC", "address": "0x5976F7dac1525eF3277836043bA474a35E6B4272", "decimals": 0, "name": "Filmscoin", "ens_address": "", "website": "https://filmscoin.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "BTHR": { "symbol": "BTHR", "address": "0xa02e3bB9cEbc03952601B3724B4940e0845BeBcf", "decimals": 18, "name": "Bethereum", "ens_address": "", "website": "https://www.bethereum.com/", "logo": { "src": "https://image.ibb.co/jPRLCx/icon_default_1.png", "width": 250, "height": 250, "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "" }, "social": { "blog": "https://medium.com/bethereum", "chat": "", "facebook": "https://www.facebook.com/bethereumproject/", "forum": "https://bitcointalk.org/index.php?topic=2898723.0", "github": "https://github.com/bethereumproject", "gitter": "", "instagram": "https://www.instagram.com/bethereum/", "linkedin": "https://www.linkedin.com/company/bethereum/", "reddit": "https://www.reddit.com/r/bethereum", "slack": "", "telegram": "https://t.me/bethereum", "twitter": "https://twitter.com/bethereumteam", "youtube": "https://www.youtube.com/channel/UCECoUw0v3gsAFULCVD7YSmA" } }, "CTL": { "symbol": "CTL", "address": "0xBf4cFD7d1eDeeEA5f6600827411B41A21eB08abd", "decimals": 2, "name": "CTL", "ens_address": "", "website": "", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "POP": { "symbol": "POP", "name": "POP Network Token", "type": "ERC20", "address": "0x5D858bcd53E085920620549214a8b27CE2f04670", "ens_address": "", "decimals": 18, "website": "https://www.thepopnetwork.org", "logo": { "src": "https://i.imgur.com/KEVDyw4.png", "width": "200", "height": "200", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "https://www.thepopnetwork.org" }, "social": { "blog": "https://blog.popchest.com", "chat": "", "facebook": "https://facebook.com/PopChest/", "forum": "https://bitcointalk.org/index.php?topic=5061712.0", "github": "https://github.com/popchest", "gitter": "", "instagram": "https://www.instagram.com/pop_chest/", "linkedin": "https://www.linkedin.com/company/popchest/", "reddit": "", "slack": "", "telegram": "https://t.me/popchest", "twitter": "https://twitter.com/pop_chest", "youtube": "https://www.youtube.com/channel/UCcxFBdBCOzBiHt5g3iPYtbw" } }, "TGT": { "symbol": "TGT", "name": "Target Coin", "type": "ERC20", "address": "0xAc3Da587eac229C9896D919aBC235CA4Fd7f72c1", "ens_address": "", "decimals": 1, "website": "https://www.tgtcoins.com", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "https://twitter.com/TargetCoin", "youtube": "" } }, "SVD": { "symbol": "SVD", "name": "savedroid", "type": "ERC20", "address": "0xbdEB4b83251Fb146687fa19D1C660F99411eefe3", "ens_address": "", "decimals": 18, "website": "https://ico.savedroid.com", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "https://medium.com/@ico_8796", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "https://www.reddit.com/r/savedroid_ico", "slack": "", "telegram": "", "twitter": "https://twitter.com/savedroidAG", "youtube": "" } }, "USDC": { "symbol": "USDC", "address": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48", "decimals": 6, "name": "USD//Coin", "ens_address": "", "website": "https://www.centre.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "https://www.centre.io" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "https://github.com/centrehq/centre-tokens", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "CAS": { "symbol": "CAS", "name": "Cashaa", "address": "0xe8780B48bdb05F928697A5e8155f672ED91462F7", "decimals": 18, "website": "https://cashaa.com", "logo": { "src": "https://cashaa.com/img/tkn-icon1.png", "width": 20, "height": 20, "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "https://cashaa.com" }, "social": { "blog": "", "chat": "", "facebook": "https://www.facebook.com/cashaaLtd", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "https://t.me/CashaaLtd", "twitter": "https://twitter.com/CashaaLTD", "youtube": "https://www.youtube.com/channel/UCwRJjX6dNz49j3Pc0ROZJbg" } }, "ZLA": { "symbol": "ZLA", "name": "Zilla", "type": "ERC20", "address": "0xfd8971d5E8E1740cE2d0A84095fCA4De729d0c16", "ens_address": "", "decimals": 18, "website": "https://zla.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "https://www.reddit.com/r/ZILLAtoken", "slack": "", "telegram": "", "twitter": "https://twitter.com/zillatoken", "youtube": "" } }, "SNT": { "symbol": "SNT", "address": "0x744d70FDBE2Ba4CF95131626614a1763DF805B9E", "decimals": 18, "name": "Status Network Token", "ens_address": "", "website": "https://status.im", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "OLE": { "symbol": "OLE", "name": "Olive", "type": "ERC20", "address": "0x9d9223436dDD466FC247e9dbbD20207e640fEf58", "ens_address": "", "decimals": 18, "website": "http://www.olivecoin.co", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "https://twitter.com/OliveCoinOLE", "youtube": "" } }, "NAC": { "symbol": "NAC", "address": "0x8d80de8A78198396329dfA769aD54d24bF90E7aa", "decimals": 18, "name": "<NAME>", "ens_address": "", "website": "https://nami.trade", "logo": { "src": "https://drive.google.com/file/d/1D8Oh0j1l_Q7MFbqyGmNFHNE2pw-lyXmw/view", "width": "32", "height": "32", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "" }, "social": { "blog": "nami.io/newsroom", "chat": "", "facebook": "https://www.facebook.com/NAMI.TRADE.OFFICIAL/", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "https://t.me/namitrade", "twitter": "https://twitter.com/namidotio", "youtube": "https://www.youtube.com/channel/UCYAqEagemhtu0MOtnE7rNJQ" } }, "EVN": { "symbol": "EVN", "address": "0xd780Ae2Bf04cD96E577D3D014762f831d97129d0", "decimals": 18, "name": "<NAME>", "ens_address": "", "website": "https://envion.org", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "<EMAIL>", "url": "" }, "social": { "blog": "https://medium.com/@envion", "chat": "https://bitcointalk.org/index.php?topic=2348435", "facebook": "https://www.facebook.com/envion.org", "forum": "", "github": "https://github.com/envion/Smart-Contracts", "gitter": "", "instagram": "https://www.instagram.com/envion_official", "linkedin": "", "reddit": "", "slack": "", "telegram": "https://t.me/Envion", "twitter": "https://twitter.com/Envion_org", "youtube": "" } }, "DROP (dropil)": { "symbol": "DROP (dropil)", "address": "0x4672bAD527107471cB5067a887f4656D585a8A31", "decimals": 18, "name": "Dropil", "ens_address": "", "website": "https://dropil.com", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "https://dex.dropil.com/newticket" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "MLN (new)": { "symbol": "MLN (new)", "address": "0xec67005c4E498Ec7f55E092bd1d35cbC47C91892", "decimals": 18, "name": "Melonport", "ens_address": "", "website": "https://melonport.com", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "https://chat.melonport.com", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "", "slack": "", "telegram": "", "twitter": "", "youtube": "" } }, "REF": { "symbol": "REF", "name": "RefToken", "type": "ERC20", "address": "0x89303500a7Abfb178B274FD89F2469C264951e1f", "ens_address": "", "decimals": 8, "website": "https://reftoken.io", "logo": { "src": "", "width": "", "height": "", "ipfs_hash": "" }, "support": { "email": "", "url": "" }, "social": { "blog": "", "chat": "", "facebook": "", "forum": "", "github": "", "gitter": "", "instagram": "", "linkedin": "", "reddit": "https://www.reddit.com/r/RefToken", "slack": "", "telegram": "", "twitter": "https://twitter.com/reftoken", "youtube": "" } }, "GAVEL": { "symbol": "GAVEL", "address":
<filename>DelibeRating/DelibeRating/app/views.py """ Definition of views. """ import operator import random from random import shuffle import datetime from django.shortcuts import render, redirect from django.http import HttpRequest, Http404, HttpResponse from django.template import RequestContext from datetime import datetime from app.forms import * from django.contrib.auth import authenticate as auth_authenticate from django.contrib.auth import update_session_auth_hash from django.contrib.auth import login as auth_login from django.contrib.auth.decorators import login_required from django.contrib.admin.views.decorators import staff_member_required from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_POST from django.db import transaction from django.db.models import Q from django.contrib import messages from django.contrib.auth.hashers import PBKDF2PasswordHasher as hasher from django.contrib.auth.hashers import make_password from time import sleep from yelpapi import YelpAPI import argparse from pprint import pprint from django.conf import settings import json from django.core.cache import cache from django.core.paginator import Paginator from django import template import ast #literal evaluation registerT = template.Library() yelp_api = YelpAPI(settings.API_KEY, timeout_s=3.0) """ View helper functions """ def get_cached_business(id): if cache.get(str(id)): print("Using cached results!") raw_data = cache.get(str(id)) data = json.loads(json.dumps(raw_data)) return data else: print("Business is not cached!") return None def get_confidence_score(user, business): if business['id'] in user.dislikes[:-1].split(','): return -1.0 tastes = ast.literal_eval(user.tastes) confidence = 0 total = 0 for cat in business['categories']: total += 1 if cat['title'] in tastes: confidence += 1 return float(confidence / total) def add_confidence_scores(user, businesses): confidence_sum = 0.0 total = 0.0 confidence_score = 0.0 try: for business in businesses: if business: business['confidence_score'] = get_confidence_score(user, business) except: businesses = [] return businesses def get_init_states(page, user, businesses, votes): try: if page == 'vote': for business in businesses: business['init_up'] = votes[0][business['id']] business['init_down'] = votes[1][business['id']] else: for business in businesses: if business['id'] in user.stars[:-1].split(','): business['init_star'] = True else: business['init_star'] = False if business['id'] in user.likes[:-1].split(','): business['init_like'] = True else: business['init_like'] = False if business['id'] in user.dislikes[:-1].split(','): business['init_dislike'] = True else: business['init_dislike'] = False except: businesses = [] return businesses def get_yelp_results(query,location,radius,sortby,pricerange,opennow,attributes): if cache.get(''.join(i for i in str(query+location+radius+sortby+pricerange+opennow) if i.isalnum())): print("Using cached results!") print(''.join(i for i in str(query+location+radius+sortby+pricerange+opennow) if i.isalnum())) raw_data = cache.get(''.join(i for i in str(query+location+radius+sortby+pricerange+opennow) if i.isalnum())) data = json.loads(json.dumps(raw_data)) else: print("Querying Yelp Fusion API") raw_data = yelp_api.search_query(term=query, location=location, radius=radius, limit=48, sort_by=sortby, price=pricerange, open_now=opennow, attributes=attributes) data = json.loads(json.dumps(raw_data)) cache.set(''.join(i for i in str(query+location+radius+sortby+pricerange+opennow) if i.isalnum()), data, 86400) #TODO: Use DEFAULT_TIMEOUT' # Cache businesses for b in data['businesses']: cache.set(b['id'], b, 2592000) #TODO: Use DEFAULT_TIMEOUT return data def user_function(user, data, query, location): words = [] tmp_words = {} if user: try: cached_data = cache.get(user.username + '_searches') if not cached_data: cached_data = {} except: cached_data = {} try: cached_data_all = cache.get('all_searches') if not cached_data_all: cached_data_all = {} except: cached_data_all = {} if user: if query in cached_data: cached_data[query] += 1 else: cached_data[query] = 1 if query in cached_data_all: cached_data_all[query] += 1 else: cached_data_all[query] = 1 if user: data['businesses'] = add_confidence_scores(user, data['businesses']) data['businesses'] = get_init_states('search', user, data['businesses'], None) cache.set(user.username + 'location', location, 2592000) # 30 days cache.set(user.username + '_searches', cached_data, 2592000) # 30 days max_search = max(cached_data.items(), key=operator.itemgetter(1))[1] for w, v in cached_data.items(): word = {} tmp_words[w] = True word['text'] = w.lower() word['weight'] = int(12 * (v / max_search)) word['link'] = 'search/?q=' + w + '&loc=' + location + '&rad=8050&sort=best_match&price=1,2,3,4&open=false' words.append(word) max_search_all = max(cached_data_all.items(), key=operator.itemgetter(1))[1] for w, v in cached_data_all.items(): if not w in tmp_words: word = {} word['text'] = w.lower() word['weight'] = int(6 * (v / max_search_all)) word['link'] = 'search/?q=' + w + '&loc=' + location + '&rad=8050&sort=best_match&price=1,2,3,4&open=false' words.append(word) cache.set('all_searches', cached_data_all, 2592000) # 30 days return data, words """ Ajax functions """ @login_required @require_POST @csrf_exempt def addopt(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) vote_opt = VoteOption() group_vote = GroupVote.objects.get(data['vote_name'] + '/' + data['element_id']) vote_opt.group_vote_id = data['vote_name'] vote_opt.opt_id = data['vote_name'] + '/' + data['element_id'] vote_opt.business_id = data['element_id'] vote_opt.upvotes = '' vote_opt.downvotes = '' vote_opt.save() response = {'success':True} return HttpResponse(json.dumps(response), content_type='application/json') @login_required @require_POST @csrf_exempt def dislike(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user business_id = data['element_id'][:-2] if business_id in user.dislikes[:-1].split(','): user.dislikes = user.dislikes.replace(business_id + ',', '') sel = '#' + data['element_id'] response = {'success': False, 'element_id': sel} else: user.dislikes += business_id + ',' sel = '#' + data['element_id'] if business_id in user.likes[:-1].split(','): user.likes = user.likes.replace(business_id + ',', '') response = {'success': True, 'toggled': True, 'element_toggled': sel[:-2] + 'tu', 'element_id': sel} else: response = {'success': True, 'toggled': False, 'element_id': sel} user.save() return HttpResponse(json.dumps(response), content_type='application/json') @login_required @require_POST @csrf_exempt def like(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user business_id = data['element_id'][:-2] tastes = ast.literal_eval(user.tastes) if business_id in user.likes[:-1].split(','): for cat in data['categories'][:-1].split(','): if tastes[cat] == 1: del tastes[cat] else: tastes[cat] -= 1 user.likes = user.likes.replace(business_id + ',', '') user.tastes = str(tastes) sel = '#' + data['element_id'] response = {'success': False, 'element_id': sel} else: for cat in data['categories'][:-1].split(','): if cat in tastes: tastes[cat] += 1 else: tastes[cat] = 1 user.likes += business_id + ',' user.tastes = str(tastes) sel = '#' + data['element_id'] if business_id in user.dislikes[:-1].split(','): user.dislikes = user.dislikes.replace(business_id + ',', '') response = {'success': True, 'toggled': True, 'element_toggled': sel[:-2] + 'td', 'element_id': sel} else: response = {'success': True, 'toggled': False, 'element_id': sel} user.save() return HttpResponse(json.dumps(response), content_type='application/json') @login_required @require_POST @csrf_exempt def star(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user business_id = data['element_id'][:-2] if business_id in user.stars[:-1].split(','): user.stars = user.stars.replace(business_id + ',', '') sel = '#' + data['element_id'] response = {'success': False, 'element_id': sel} else: user.stars += business_id + ',' sel = '#' + data['element_id'] if business_id in user.dislikes[:-1].split(','): user.dislikes = user.dislikes.replace(business_id + ',', '') response = {'success': True, 'toggled': True, 'element_toggled': sel[:-2] + 'td', 'element_id': sel} else: response = {'success': True, 'toggled': False, 'element_id': sel} user.save() return HttpResponse(json.dumps(response), content_type='application/json') def cast_vote(user, data, vote_opt, type, vote_name, element_id): group_vote = GroupVote.objects.get(vote_opt.group_vote_id) vote_counts = [] business_names = [] if type == 0: votes_pri = vote_opt.downvotes votes_sec = vote_opt.upvotes pri = 'vd' sec = 'vu' else: votes_pri = vote_opt.upvotes votes_sec = vote_opt.downvotes pri = 'vu' sec = 'vd' if user.username in votes_pri[:-1].split(','): votes_pri = votes_pri.replace(user.username + ',', '') sel = '#' + element_id.split('/')[1] response = {'success': False, 'element_id': sel} else: votes_pri += user.username + ',' sel = '#' + element_id.split('/')[1] if user.username in votes_sec[:-1].split(','): votes_sec = votes_sec.replace(user.username + ',', '') response = {'success': True, 'toggled': True, 'element_toggled': sel + sec, 'element_id': sel + pri} else: response = {'success': True, 'toggled': False, 'element_id': sel + pri} if type == 0: vote_opt.downvotes = votes_pri vote_opt.upvotes = votes_sec else: vote_opt.upvotes = votes_pri vote_opt.downvotes = votes_sec vote_opt.save() vote_options = GroupVote.objects.get_options(group_vote.vote_id) for vo in vote_options: vo_count = VoteOption.objects.vote_count(vo.opt_id) business = get_cached_business(vo.business_id) business_names.append(business['name']) vote_counts.append(vo_count) response["chart_labels"] = business_names response["chart_data"] = vote_counts return response @login_required @require_POST @csrf_exempt def downvote(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user vote_opt = VoteOption.objects.get(data['element_id'][:-2]) response = cast_vote(user, data, vote_opt, 0, data['vote_name'], data['element_id'][:-2]) return HttpResponse(json.dumps(response), content_type='application/json') @login_required @require_POST @csrf_exempt def upvote(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user vote_opt = VoteOption.objects.get(data['element_id'][:-2]) response = cast_vote(user, data, vote_opt, 1, data['vote_name'], data['element_id'][:-2]) return HttpResponse(json.dumps(response), content_type='application/json') @login_required @require_POST @csrf_exempt def update_chart(request): if request.method == 'POST': raw_data = request.body.decode('utf-8') data = json.loads(raw_data) user = request.user group_vote = GroupVote.objects.get(data['vote_name']) vote_counts = [] business_names = [] vote_options = GroupVote.objects.get_options(group_vote.vote_id) for vo in vote_options: vo_count = VoteOption.objects.vote_count(vo.opt_id) business = get_cached_business(vo.business_id) business_names.append(business['name']) vote_counts.append(vo_count) response = {'success': True, 'chart_labels': business_names, 'chart_data': vote_counts} return HttpResponse(json.dumps(response), content_type='application/json') """ Django page views """ @login_required def create_group(request): """Renders the create group page.""" print("Create Group View") time_form = CustomTimeForm() if request.method == 'POST': print("Create Group: POST Request") form = CustomGroupCreationForm(data=request.POST) if form.is_valid(): print("Create Group: Form Valid") group, created = Group.objects.get_or_create(name=form.cleaned_data['name']) user = request.user cgroup = CustomGroup.objects.create(form.cleaned_data['name'], group.id) user = request.user if created: user.groups.add(group) group.save() messages.success(request, 'Your group was successfully created!') return redirect('group/?g=' + group.name) else: print("Create Group: Form Invalid") print(form.errors) messages.error(request, 'Please correct the error below.') else: print("Register: GET Request") form = CustomGroupCreationForm() assert isinstance(request, HttpRequest) return render( request, 'app/create_group.html', { 'title': 'Create Group', 'form': form, 'time_form': time_form, } ) @login_required def create_group_vote(request): """Renders the create vote page.""" print("Create Vote View") time_form = CustomTimeForm() if request.method == 'POST': print("Create Vote: POST Request") form = CustomVoteCreationForm(data=request.POST) if form.is_valid(): print("Create Vote: Form Valid") groupname = request.GET.get('g', None) group = Group.objects.get(name = groupname) cgroup = CustomGroup.objects.get(group.id) vote_id = str(group.id) + datetime.datetime.now().strftime("--%m-%d-%y--") + form.cleaned_data["name"] name = datetime.datetime.now().strftime("(%m-%d-%y) ") + form.cleaned_data["name"] group_vote, created = GroupVote.objects.get_or_create(vote_id, name, cgroup) if created: messages.success(request, 'Your vote was successfully created!') return redirect('group/vote/?g=' + group.name + '&v=' + group_vote.vote_id) else: print("Create Vote: Form Invalid")
#! /usr/bin/env python3 # Copyright(c) 2017-2018 Intel Corporation. # License: MIT See LICENSE file in root directory. GREEN = '\033[1;32m' RED = '\033[1;31m' NOCOLOR = '\033[0m' YELLOW = '\033[1;33m' try: from openvino.inference_engine import IENetwork, ExecutableNetwork, IECore import openvino.inference_engine.ie_api except: print(RED + '\nPlease make sure your OpenVINO environment variables are set by sourcing the' + YELLOW + ' setupvars.sh ' + RED + 'script found in <your OpenVINO install location>/bin/ folder.\n' + NOCOLOR) exit(1) import cv2 import numpy import time import sys import threading import os from sys import argv import datetime import queue from queue import * INFERENCE_DEV = "MYRIAD" sep = os.path.sep DEFAULT_IMAGE_DIR = "." + sep + "images" DEFAULT_MODEL_XML = "." + sep + "googlenet-v1.xml" DEFAULT_MODEL_BIN = "." + sep + "googlenet-v1.bin" cv_window_name = "benchmark_ncs" # how long to wait for queues QUEUE_WAIT_SECONDS = 10 # set some global parameters to initial values that may get overriden with arguments to the application. inference_device = INFERENCE_DEV image_dir = DEFAULT_IMAGE_DIR number_of_devices = 1 number_of_inferences = 1000 run_async = True time_threads = True time_main = False threads_per_dev = 3 # for each device one executable network will be created and this many threads will be simultaneous_infer_per_thread = 6 # Each thread will start this many async inferences at at time. # it should be at least the number of NCEs on board. The Myriad X has 2 # seem to get slightly better results more. Myriad X does well with 4 report_interval = int(number_of_inferences / 10) #report out the current FPS every this many inferences model_xml_fullpath = DEFAULT_MODEL_XML model_bin_fullpath = DEFAULT_MODEL_BIN net_config = {'HW_STAGES_OPTIMIZATION': 'YES', 'COMPUTE_LAYOUT':'VPU_NCHW', 'RESHAPE_OPTIMIZATION':'NO'} INFER_RES_QUEUE_SIZE = 6 def handle_args(): """Reads the commandline args and adjusts initial values of globals values to match :return: False if there was an error with the args, or True if args processed ok. """ global number_of_devices, number_of_inferences, model_xml_fullpath, model_bin_fullpath, run_async, \ time_threads, time_main, num_ncs_devs, threads_per_dev, simultaneous_infer_per_thread, report_interval, \ image_dir, inference_device have_model_xml = False have_model_bin = False for an_arg in argv: lower_arg = str(an_arg).lower() if (an_arg == argv[0]): continue elif (lower_arg == 'help'): return False elif (lower_arg.startswith('num_devices=') or lower_arg.startswith("nd=")): try: arg, val = str(an_arg).split('=', 1) num_dev_str = val number_of_devices = int(num_dev_str) if (number_of_devices < 0): print('Error - num_devices argument invalid. It must be > 0') return False print('setting num_devices: ' + str(number_of_devices)) except: print('Error - num_devices argument invalid. It must be between 1 and number of devices in system') return False; elif (lower_arg.startswith('device=') or lower_arg.startswith("dev=")): try: arg, val = str(an_arg).split('=', 1) dev = val inference_device = str(dev) print("inference device:", inference_device) if (inference_device != "MYRIAD" and inference_device != "CPU" ): print('Error - Device must be CPU or MYRIAD') return False print('setting device: ' + str(inference_device)) except: print('Error - Device must be CPU or MYRIAD') return False; elif (lower_arg.startswith('report_interval=') or lower_arg.startswith("ri=")): try: arg, val = str(an_arg).split('=', 1) val_str = val report_interval = int(val_str) if (report_interval < 0): print('Error - report_interval must be greater than or equal to 0') return False print('setting report_interval: ' + str(report_interval)) except: print('Error - report_interval argument invalid. It must be greater than or equal to zero') return False; elif (lower_arg.startswith('num_inferences=') or lower_arg.startswith('ni=')): try: arg, val = str(an_arg).split('=', 1) num_infer_str = val number_of_inferences = int(num_infer_str) if (number_of_inferences < 0): print('Error - num_inferences argument invalid. It must be > 0') return False print('setting num_inferences: ' + str(number_of_inferences)) except: print('Error - num_inferences argument invalid. It must be between 1 and number of devices in system') return False; elif (lower_arg.startswith('num_threads_per_device=') or lower_arg.startswith('ntpd=')): try: arg, val = str(an_arg).split('=', 1) val_str = val threads_per_dev = int(val_str) if (threads_per_dev < 0): print('Error - threads_per_dev argument invalid. It must be > 0') return False print('setting num_threads_per_device: ' + str(threads_per_dev)) except: print('Error - num_threads_per_device argument invalid, it must be a positive integer.') return False; elif (lower_arg.startswith('num_simultaneous_inferences_per_thread=') or lower_arg.startswith('nsipt=')): try: arg, val = str(an_arg).split('=', 1) val_str = val simultaneous_infer_per_thread = int(val_str) if (simultaneous_infer_per_thread < 0): print('Error - simultaneous_infer_per_thread argument invalid. It must be > 0') return False print('setting num_simultaneous_inferences_per_thread: ' + str(simultaneous_infer_per_thread)) except: print('Error - num_simultaneous_inferences_per_thread argument invalid, it must be a positive integer.') return False; elif (lower_arg.startswith('model_xml=') or lower_arg.startswith('mx=')): try: arg, val = str(an_arg).split('=', 1) model_xml_fullpath = val if not (os.path.isfile(model_xml_fullpath)): print("Error - Model XML file passed does not exist or isn't a file") return False print('setting model_xml: ' + str(model_xml_fullpath)) have_model_xml = True except: print('Error with model_xml argument. It must be a valid model file generated by the OpenVINO Model Optimizer') return False; elif (lower_arg.startswith('model_bin=') or lower_arg.startswith('mb=')): try: arg, val = str(an_arg).split('=', 1) model_bin_fullpath = val if not (os.path.isfile(model_bin_fullpath)): print("Error - Model bin file passed does not exist or isn't a file") return False print('setting model_bin: ' + str(model_bin_fullpath)) have_model_bin = True except: print('Error with model_bin argument. It must be a valid model file generated by the OpenVINO Model Optimizer') return False; elif (lower_arg.startswith('run_async=') or lower_arg.startswith('ra=')) : try: arg, val = str(an_arg).split('=', 1) run_async = (val.lower() == 'true') print ('setting run_async: ' + str(run_async)) except: print("Error with run_async argument. It must be 'True' or 'False' ") return False; elif (lower_arg.startswith('image_dir=') or lower_arg.startswith('id=')): try: arg, val = str(an_arg).split('=', 1) image_dir = val if not (os.path.isdir(image_dir)): print("Error - Image directory passed does not exist or isn't a directory:") print(" passed value: " + image_dir) return False print('setting image_dir: ' + str(image_dir)) except: print('Error with model_xml argument. It must be a valid model file generated by the OpenVINO Model Optimizer') return False; elif (lower_arg.startswith('time_threads=') or lower_arg.startswith('tt=')) : try: arg, val = str(an_arg).split('=', 1) time_threads = (val.lower() == 'true') print ('setting time_threads: ' + str(time_threads)) except: print("Error with time_threads argument. It must be 'True' or 'False' ") return False; elif (lower_arg.startswith('time_main=') or lower_arg.startswith('tm=')) : try: arg, val = str(an_arg).split('=', 1) time_main = (val.lower() == 'true') print ('setting time_main: ' + str(time_main)) except: print("Error with time_main argument. It must be 'True' or 'False' ") return False; if (time_main == False and time_threads == False): print("Error - Both time_threads and time_main args were set to false. One of these must be true. ") return False if ((have_model_bin and not have_model_xml) or (have_model_xml and not have_model_bin)): print("Error - only one of model_bin and model_xml were specified. You must specify both or neither.") return False if (run_async == False) and (simultaneous_infer_per_thread != 1): print("Warning - If run_async is False then num_simultaneous_inferences_per_thread must be 1.") print("Setting num_simultaneous_inferences_per_thread to 1") simultaneous_infer_per_thread = 1 return True def print_arg_vals(): print("") print("--------------------------------------------------------") print("Current date and time: " + str(datetime.datetime.now())) print("") print("program arguments:") print("------------------") print('device: ' + inference_device) print('num_devices: ' + str(number_of_devices)) print('num_inferences: ' + str(number_of_inferences)) print('num_threads_per_device: ' + str(threads_per_dev)) print('num_simultaneous_inferences_per_thread: ' + str(simultaneous_infer_per_thread)) print('report_interval: ' + str(report_interval)) print('model_xml: ' + str(model_xml_fullpath)) print('model_bin: ' + str(model_bin_fullpath)) print('image_dir: ' + str(image_dir)) print('run_async: ' + str(run_async)) print('time_threads: ' + str(time_threads)) print('time_main: ' + str(time_main)) print("--------------------------------------------------------") def print_usage(): print('\nusage: ') print('python3 benchmark_ncs [help][nd=<number of devices to use>] [ni=<number of inferences per device>]') print(' [report_interval=<num inferences between reporting>] [ntpd=<number of threads to use per device>]') print(' [nsipt=<simultaneous inference on each thread>] [mx=<path to model xml file> mb=<path to model bin file>]') print('') print('options:') print(" num_devices or nd - The number of devices to use for inferencing ") print(" The value must be between 1 and the total number of devices in the system.") print(" Default is to use 1 device. ") print(" num_inferences or ni - The number of inferences to run on each device. ") print(" Default is to run 200 inferences. ") print(" report_interval or ri - Report the current FPS every time this many inferences are complete. To surpress reporting set to 0") print(" Default is to report FPS ever 400 inferences. ") print(" num_threads_per_device or ntpd - The number of threads to create that will run inferences in parallel for each device. ") print(" Default is to create 2 threads per device. ") print(" num_simultaneous_inferences_per_thread or nsipt - The number of inferences that each thread will create asynchronously. ") print(" This should be at least equal to the number of NCEs on board or more.") print(" Default is 4 simultaneous inference per thread.") print(" model_xml or mx - Full path to the model xml file generated
<reponame>Kolkir/superpoint<filename>python/src/homographies.py # The code is based on https://github.com/rpautrat/SuperPoint/ that is licensed as: # MIT License # # Copyright (c) 2018 <NAME> & <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from math import pi import numpy as np import torch from scipy.stats import truncnorm from torchvision.transforms import functional_tensor import cv2 class HomographyConfig(object): def __init__(self): self.num = 15 self.perspective = True self.scaling = True self.rotation = True self.translation = True self.n_scales = 5 self.n_angles = 25 self.scaling_amplitude = 0.1 self.perspective_amplitude_x = 0.1 self.perspective_amplitude_y = 0.1 self.patch_ratio = 0.5 self.max_angle = pi / 2 self.allow_artifacts = False self.translation_overflow = 0. self.valid_border_margin = 8 self.aggregation = 'sum' def init_for_preprocess(self): self.translation = True self.rotation = True self.scaling = True self.perspective = True self.scaling_amplitude = 0.2 self.perspective_amplitude_x = 0.2 self.perspective_amplitude_y = 0.2 self.allow_artifacts = True self.patch_ratio = 0.85 def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=torch.float32): a = mean - 2 * stddev b = mean + 2 * stddev return torch.tensor(truncnorm(a, b).rvs(shape), dtype=dtype) def random_uniform(shape, low, high): if low > high: low, high = high, low if low == high: high = low + 0.00001 return torch.distributions.uniform.Uniform(low, high).sample(shape) def sample_homography( shape, perspective=True, scaling=True, rotation=True, translation=True, n_scales=5, n_angles=25, scaling_amplitude=0.1, perspective_amplitude_x=0.1, perspective_amplitude_y=0.1, patch_ratio=0.5, max_angle=pi / 2, allow_artifacts=False, translation_overflow=0.): """Sample a random valid homography. Computes the homography transformation between a random patch in the original image and a warped projection with the same image size. As in `tf.contrib.image.transform`, it maps the output point (warped patch) to a transformed input point (original patch). The original patch, which is initialized with a simple half-size centered crop, is iteratively projected, scaled, rotated and translated. Arguments: shape: A rank-2 `Tensor` specifying the height and width of the original image. perspective: A boolean that enables the perspective and affine transformations. scaling: A boolean that enables the random scaling of the patch. rotation: A boolean that enables the random rotation of the patch. translation: A boolean that enables the random translation of the patch. n_scales: The number of tentative scales that are sampled when scaling. n_angles: The number of tentatives angles that are sampled when rotating. scaling_amplitude: Controls the amount of scale. perspective_amplitude_x: Controls the perspective effect in x direction. perspective_amplitude_y: Controls the perspective effect in y direction. patch_ratio: Controls the size of the patches used to create the homography. max_angle: Maximum angle used in rotations. allow_artifacts: A boolean that enables artifacts when applying the homography. translation_overflow: Amount of border artifacts caused by translation. Returns: A `Tensor` of shape `[1, 8]` corresponding to the flattened homography transform. """ # Corners of the output image margin = (1 - patch_ratio) / 2 pts1 = margin + torch.tensor([[0, 0], [0, patch_ratio], [patch_ratio, patch_ratio], [patch_ratio, 0]], dtype=torch.float32) # Corners of the input patch pts2 = pts1 # Random perspective and affine perturbations if perspective: if not allow_artifacts: perspective_amplitude_x = min(perspective_amplitude_x, margin) perspective_amplitude_y = min(perspective_amplitude_y, margin) perspective_displacement = truncated_normal([1], 0., perspective_amplitude_y / 2) h_displacement_left = truncated_normal([1], 0., perspective_amplitude_x / 2) h_displacement_right = truncated_normal([1], 0., perspective_amplitude_x / 2) pts2 += torch.stack([torch.cat([h_displacement_left, perspective_displacement], 0), torch.cat([h_displacement_left, -perspective_displacement], 0), torch.cat([h_displacement_right, perspective_displacement], 0), torch.cat([h_displacement_right, -perspective_displacement], 0)]) # Random scaling # sample several scales, check collision with borders, randomly pick a valid one if scaling: scales = torch.cat( [torch.tensor([1.]), truncated_normal([n_scales], 1, scaling_amplitude / 2)], 0) center = torch.mean(pts2, dim=0, keepdim=True) scaled = torch.unsqueeze(pts2 - center, dim=0) * torch.unsqueeze( torch.unsqueeze(scales, dim=1), dim=1) + center if allow_artifacts: valid = torch.arange(n_scales) # all scales are valid except scale=1 else: valid = torch.nonzero(torch.sum((scaled >= 0.) & (scaled < 1.), [1, 2]))[:, 0] idx = valid[torch.randint(high=valid.shape[0], size=())] pts2 = scaled[idx] # Random translation if translation: t_min, _ = torch.min(pts2, dim=0) t_max, _ = torch.min(1. - pts2, dim=0) if allow_artifacts: t_min += translation_overflow t_max += translation_overflow pts2 += torch.unsqueeze(torch.stack([random_uniform((), -t_min[0], t_max[0]), random_uniform((), -t_min[1], t_max[1])]), dim=0) # Random rotation # sample several rotations, check collision with borders, randomly pick a valid one if rotation: angles = torch.linspace(-max_angle, max_angle, n_angles) angles = torch.cat([torch.tensor([0.]), angles], dim=0) # in case no rotation is valid center = torch.mean(pts2, dim=0, keepdim=True) rot_mat = torch.reshape(torch.stack([torch.cos(angles), -torch.sin(angles), torch.sin(angles), torch.cos(angles)], dim=1), [-1, 2, 2]) rotated = torch.matmul( torch.tile(torch.unsqueeze(pts2 - center, dim=0), [n_angles + 1, 1, 1]), rot_mat) + center if allow_artifacts: valid = torch.arange(n_angles) # all angles are valid, except angle=0 else: valid = torch.nonzero(torch.sum((rotated >= 0.) & (rotated < 1.), [1, 2]))[:, 0] idx = valid[torch.randint(high=valid.shape[0], size=())] pts2 = rotated[idx] # Rescale to actual size shape = torch.tensor(shape[::-1], dtype=torch.float32) # different convention [y, x] pts1 *= torch.unsqueeze(shape, dim=0) pts2 *= torch.unsqueeze(shape, dim=0) def ax(p, q): return torch.tensor([p[0], p[1], 1, 0, 0, 0, -p[0] * q[0], -p[1] * q[0]]) def ay(p, q): return torch.tensor([0, 0, 0, p[0], p[1], 1, -p[0] * q[1], -p[1] * q[1]]) a_mat = torch.stack([f(pts1[i], pts2[i]) for i in range(4) for f in (ax, ay)], dim=0) p_mat = torch.stack([pts2[i][j] for i in range(4) for j in range(2)]).t() p_mat.unsqueeze_(dim=1) x = torch.linalg.solve(a_mat, p_mat) homography = x.t() return homography.squeeze(dim=0) def invert_homography(h): """ Computes the inverse transformation for a flattened homography transformation. """ return mat2flat(torch.linalg.inv(flat2mat(h))) def flat2mat(h): """ Converts a flattened homography transformation with shape `[1, 8]` to its corresponding homography matrix with shape `[1, 3, 3]`. """ return torch.reshape(torch.cat([h, torch.ones([h.shape[0], 1], device=h.device)], dim=1), [-1, 3, 3]) def mat2flat(h): """ Converts an homography matrix with shape `[1, 3, 3]` to its corresponding flattened homography transformation with shape `[1, 8]`. """ h = torch.reshape(h, [-1, 9]) return (h / h[:, 8:9])[:, :8] def homography_transform(t, h_coeffs, interpolation='bilinear'): return functional_tensor.perspective(t, h_coeffs.numpy().flatten(), interpolation=interpolation) def homographic_augmentation(image, points, config): # Sample random homography transform img_h = image.shape[2] img_w = image.shape[3] image_shape = [img_h, img_w] homography = sample_homography(image_shape, config) # Apply transformation warped_image = homography_transform(image, homography) valid_mask = compute_valid_mask(image_shape, homography, config.valid_border_margin) warped_points = warp_points(points, homography) warped_points = filter_points(warped_points, image_shape) return warped_image, warped_points, valid_mask, homography def erode(image, erosion_radius): orig_device = image.device kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (erosion_radius * 2,) * 2) image = image.cpu().numpy().transpose([1, 2, 0]) # adapt channels for OpenCV format image = cv2.erode(image, kernel, iterations=1, borderType=cv2.BORDER_CONSTANT, borderValue=0) image = np.expand_dims(image, axis=0) # restore torch image format image = torch.from_numpy(image) image = image.to(device=orig_device) return image def homography_adaptation(image, net, config): """ Performs homography adaptation. Inference using multiple random warped patches of the same input image for robust predictions. Arguments: image: A `Tensor` with shape `[B, C, H, W,]`. net: A function that takes an image as input, performs inference, and outputs the prediction dictionary. num: the number of sampled homographies. valid_border_margin: size of the border to ignore detections. aggregation: how to aggregate probabilities max or sum Returns: A dictionary which contains the aggregated detection probabilities. """ all_probs, _, _ = net(image) all_counts = torch.ones_like(all_probs) all_probs.unsqueeze_(dim=-1) all_counts.unsqueeze_(dim=-1) shape = image.shape[2:4] def step(probs, counts): with torch.no_grad(): H = sample_homography(shape, perspective=config.perspective, scaling=config.scaling, rotation=config.rotation, translation=config.translation, n_scales=config.n_scales, n_angles=config.n_angles, scaling_amplitude=config.scaling_amplitude, perspective_amplitude_x=config.perspective_amplitude_x, perspective_amplitude_y=config.perspective_amplitude_y, patch_ratio=config.patch_ratio, max_angle=config.max_angle, allow_artifacts=config.allow_artifacts, translation_overflow=config.translation_overflow) H.unsqueeze_(dim=0) H_inv = invert_homography(H) warped = homography_transform(image, H) count = homography_transform(torch.ones(shape, device=image.device).unsqueeze(0), H_inv, interpolation='nearest') mask = homography_transform(torch.ones(shape, device=image.device).unsqueeze(0), H, interpolation='nearest') # Ignore the detections too close to the border to avoid artifacts if config.valid_border_margin != 0: count = erode(count, config.valid_border_margin) mask = erode(mask, config.valid_border_margin) # Predict detection
<gh_stars>0 from numpy import array, ceil from models import LoadSampler def case75(flex_level = 'MEDIUM'): case = {"version": "ANM"} ## system MVA base case["baseMVA"] = 1.0 ## Bus data case["bus"] = array([ [1000, 3, 0.0, 0.0, 0, 0, 1, 1, 0, 33, 1, 1.1, 0.9], [1100, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1101, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1102, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1103, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1104, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1105, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1106, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1107, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1108, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1109, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1110, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1111, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1112, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1113, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1114, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1115, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1116, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1117, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1118, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1119, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1120, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1121, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1122, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1123, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1124, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1125, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1126, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1127, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1128, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1129, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1130, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1131, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1132, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1133, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1134, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1135, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1136, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1137, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1138, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1139, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1140, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1141, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1142, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1143, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1144, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1145, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1146, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1147, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1148, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1149, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1150, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1151, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1152, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1153, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1154, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1155, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1156, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1157, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1158, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1159, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1160, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1161, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1162, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1163, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1164, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1165, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1166, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1167, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1168, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1169, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1170, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1171, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1172, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1173, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1174, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95], [1175, 1, 0.0, 0.0, 0, 0, 1, 1, 0, 11, 1, 1.05, 0.95] ]) ## Gen data case["gen"] = array([ [1000, 0.0, 0.0, 100.0, -100.0, 1.02, 100.0, 1, 100.0, -100.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1101, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1103, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1104, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1106, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1107, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1109, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1110, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1111, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1113, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1114, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1115, -1.0, -0.2031, 0.0, 0.0, 0.0, 100.0, 1, 0.0, 0.0, 0.0, 0.0, 0.0,
'{}' detected".format( self.argname ) else: msg = "fixture '{}' not found".format(self.argname) msg += "\n available fixtures: {}".format(", ".join(sorted(available))) msg += "\n use 'pytest --fixtures [testpath]' for help on them." return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) class FixtureLookupErrorRepr(TerminalRepr): def __init__(self, filename, firstlineno, tblines, errorstring, argname): self.tblines = tblines self.errorstring = errorstring self.filename = filename self.firstlineno = firstlineno self.argname = argname def toterminal(self, tw): # tw.line("FixtureLookupError: %s" %(self.argname), red=True) for tbline in self.tblines: tw.line(tbline.rstrip()) lines = self.errorstring.split("\n") if lines: tw.line( "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), red=True, ) for line in lines[1:]: tw.line( "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), red=True, ) tw.line() tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) location = "%s:%s" % (fs, lineno + 1) source = _pytest._code.Source(fixturefunc) fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) def call_fixture_func(fixturefunc, request, kwargs): yieldctx = is_generator(fixturefunc) if yieldctx: it = fixturefunc(**kwargs) res = next(it) finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) request.addfinalizer(finalizer) else: res = fixturefunc(**kwargs) return res def _teardown_yield_fixture(fixturefunc, it): """Executes the teardown of a fixture function by advancing the iterator after the yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" try: next(it) except StopIteration: pass else: fail_fixturefunc( fixturefunc, "yield_fixture function has more than one 'yield'" ) class FixtureDef(object): """ A container for a factory definition. """ def __init__( self, fixturemanager, baseid, argname, func, scope, params, unittest=False, ids=None, ): self._fixturemanager = fixturemanager self.baseid = baseid or "" self.has_location = baseid is not None self.func = func self.argname = argname self.scope = scope self.scopenum = scope2index( scope or "function", descr="Fixture '{}'".format(func.__name__), where=baseid, ) self.params = params self.argnames = getfuncargnames(func, is_method=unittest) self.unittest = unittest self.ids = ids self._finalizers = [] def addfinalizer(self, finalizer): self._finalizers.append(finalizer) def finish(self, request): exceptions = [] try: while self._finalizers: try: func = self._finalizers.pop() func() except: # noqa exceptions.append(sys.exc_info()) if exceptions: e = exceptions[0] del exceptions # ensure we don't keep all frames alive because of the traceback six.reraise(*e) finally: hook = self._fixturemanager.session.gethookproxy(request.node.fspath) hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) # even if finalization fails, we invalidate # the cached fixture value and remove # all finalizers because they may be bound methods which will # keep instances alive if hasattr(self, "cached_result"): del self.cached_result self._finalizers = [] def execute(self, request): # get required arguments and register our own finish() # with their finalization for argname in self.argnames: fixturedef = request._get_active_fixturedef(argname) if argname != "request": fixturedef.addfinalizer(functools.partial(self.finish, request=request)) my_cache_key = request.param_index cached_result = getattr(self, "cached_result", None) if cached_result is not None: result, cache_key, err = cached_result if my_cache_key == cache_key: if err is not None: six.reraise(*err) else: return result # we have a previous but differently parametrized fixture instance # so we need to tear it down before creating a new one self.finish(request) assert not hasattr(self, "cached_result") hook = self._fixturemanager.session.gethookproxy(request.node.fspath) return hook.pytest_fixture_setup(fixturedef=self, request=request) def __repr__(self): return "<FixtureDef argname=%r scope=%r baseid=%r>" % ( self.argname, self.scope, self.baseid, ) def resolve_fixture_function(fixturedef, request): """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific instances and bound methods. """ fixturefunc = fixturedef.func if fixturedef.unittest: if request.instance is not None: # bind the unbound method to the TestCase instance fixturefunc = fixturedef.func.__get__(request.instance) else: # the fixture function needs to be bound to the actual # request.instance so that code working with "fixturedef" behaves # as expected. if request.instance is not None: fixturefunc = getimfunc(fixturedef.func) if fixturefunc != fixturedef.func: fixturefunc = fixturefunc.__get__(request.instance) return fixturefunc def pytest_fixture_setup(fixturedef, request): """ Execution of fixture setup. """ kwargs = {} for argname in fixturedef.argnames: fixdef = request._get_active_fixturedef(argname) result, arg_cache_key, exc = fixdef.cached_result request._check_scope(argname, request.scope, fixdef.scope) kwargs[argname] = result fixturefunc = resolve_fixture_function(fixturedef, request) my_cache_key = request.param_index try: result = call_fixture_func(fixturefunc, request, kwargs) except TEST_OUTCOME: fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) raise fixturedef.cached_result = (result, my_cache_key, None) return result def _ensure_immutable_ids(ids): if ids is None: return if callable(ids): return ids return tuple(ids) def wrap_function_to_error_out_if_called_directly(function, fixture_marker): """Wrap the given fixture function so we can raise an error about it being called directly, instead of used as an argument in a test function. """ message = FIXTURE_FUNCTION_CALL.format( name=fixture_marker.name or function.__name__ ) @six.wraps(function) def result(*args, **kwargs): fail(message, pytrace=False) # keep reference to the original function in our own custom attribute so we don't unwrap # further than this point and lose useful wrappings like @mock.patch (#3774) result.__pytest_wrapped__ = _PytestWrapper(function) return result @attr.s(frozen=True) class FixtureFunctionMarker(object): scope = attr.ib() params = attr.ib(converter=attr.converters.optional(tuple)) autouse = attr.ib(default=False) ids = attr.ib(default=None, converter=_ensure_immutable_ids) name = attr.ib(default=None) def __call__(self, function): if isclass(function): raise ValueError("class fixtures not supported (maybe in the future)") if getattr(function, "_pytestfixturefunction", False): raise ValueError( "fixture is being applied more than once to the same function" ) function = wrap_function_to_error_out_if_called_directly(function, self) name = self.name or function.__name__ if name == "request": warnings.warn(FIXTURE_NAMED_REQUEST) function._pytestfixturefunction = self return function def fixture(scope="function", params=None, autouse=False, ids=None, name=None): """Decorator to mark a fixture factory function. This decorator can be used, with or without parameters, to define a fixture function. The name of the fixture function can later be referenced to cause its invocation ahead of running tests: test modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` marker. Test functions can directly use fixture names as input arguments in which case the fixture instance returned from the fixture function will be injected. Fixtures can provide their values to test functions using ``return`` or ``yield`` statements. When using ``yield`` the code block after the ``yield`` statement is executed as teardown code regardless of the test outcome, and must yield exactly once. :arg scope: the scope for which this fixture is shared, one of ``"function"`` (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. ``"package"`` is considered **experimental** at this time. :arg params: an optional list of parameters which will cause multiple invocations of the fixture function and all of the tests using it. :arg autouse: if True, the fixture func is activated for all tests that can see it. If False (the default) then an explicit reference is needed to activate the fixture. :arg ids: list of string ids each corresponding to the params so that they are part of the test id. If no ids are provided they will be generated automatically from the params. :arg name: the name of the fixture. This defaults to the name of the decorated function. If a fixture is used in the same module in which it is defined, the function name of the fixture will be shadowed by the function arg that requests the fixture; one way to resolve this is to name the decorated function ``fixture_<fixturename>`` and then use ``@pytest.fixture(name='<fixturename>')``. """ if callable(scope) and params is None and autouse is False: # direct decoration return FixtureFunctionMarker("function", params, autouse, name=name)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): """ (return a) decorator to mark a yield-fixture factory function. .. deprecated:: 3.0 Use :py:func:`pytest.fixture` directly instead. """ return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) defaultfuncargprefixmarker = fixture() @fixture(scope="session") def pytestconfig(request): """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. Example:: def test_foo(pytestconfig): if pytestconfig.getoption("verbose"): ... """ return request.config class FixtureManager(object): """ pytest fixtures definitions and information is stored and managed from this class. During collection fm.parsefactories() is called multiple times to parse fixture function definitions into FixtureDef objects and internal data structures. During collection of test functions, metafunc-mechanics instantiate a FuncFixtureInfo object which is cached per node/func-name. This FuncFixtureInfo object is later retrieved by Function nodes which themselves offer a fixturenames attribute. The FuncFixtureInfo object holds information about fixtures and FixtureDefs relevant for a particular function. An initial list of fixtures is assembled like this: - ini-defined usefixtures - autouse-marked fixtures along the collection chain up from the function - usefixtures markers at module/class/function level - test function funcargs Subsequently the funcfixtureinfo.fixturenames attribute is computed as the closure of the fixtures needed to setup the initial fixtures, i. e. fixtures needed by fixture functions themselves are appended to the fixturenames list.
) os.unlink( dir_w+"/foo5" ) if os.path.exists( myldir ): shutil.rmtree( myldir ) if os.path.exists( mysdir ): shutil.rmtree( mysdir ) def test_mcoll_from_devtest(self): # build expected variables with similar devtest names progname = __file__ myssize = str(os.stat(progname).st_size) username = s.adminsession.getUserName() irodszone = s.adminsession.getZoneName() testuser1 = s.sessions[1].getUserName() irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId irodsdefresource = s.adminsession.getDefResource() dir_w = "." sfile2 = dir_w+"/sfile2" commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 ) mysdir = "/tmp/irodssdir" myldir = dir_w+"/ldir" if os.path.exists( myldir ): shutil.rmtree( myldir ) # make a directory containing 20 small files if not os.path.isdir(mysdir): os.mkdir(mysdir) for i in range(20): mysfile = mysdir+"/sfile"+str(i) shutil.copyfile( progname, mysfile ) assertiCmd(s.adminsession,"imkdir icmdtest") # we put foo1 in $irodsdefresource and foo2 in testresource assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" ) assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" ) # prepare icmdtesta assertiCmd(s.adminsession,"ireg -KCR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtesta" ) # mcoll test assertiCmd(s.adminsession,"imcoll -m link "+irodshome+"/icmdtesta "+irodshome+"/icmdtestb" ) assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestb", "LIST", "icmdtestb" ) if os.path.exists(dir_w+"/testb"): shutil.rmtree( dir_w+"/testb" ) assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtestb "+dir_w+"/testb", "LIST", "testb" ) output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testb" ) print "output is ["+str(output)+"]" assert output[0] == 0 assert output[1] == "", "diff output was not empty..." assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestb" ) assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestb" ) shutil.rmtree( dir_w+"/testb" ) assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm" ) assertiCmd(s.adminsession,"imcoll -m filesystem -R "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestm" ) assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestm/testmm" ) assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo1" ) assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestm/testmm/foo11" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm/foo1 "+irodshome+"/icmdtestm/testmm/foo2" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm "+irodshome+"/icmdtestm/testmm1" ) # mv to normal collection assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1/foo2 "+irodshome+"/icmdtest/foo100" ) assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest/foo100", "LIST", "foo100" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestm/testmm1 "+irodshome+"/icmdtest/testmm1" ) assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtest/testmm1", "LIST", "foo11" ) assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtest/testmm1 "+irodshome+"/icmdtest/foo100" ) if os.path.exists(dir_w+"/testm"): shutil.rmtree( dir_w+"/testm" ) assertiCmd(s.adminsession,"iget -fvrK "+irodshome+"/icmdtesta "+dir_w+"/testm", "LIST", "testm") output = commands.getstatusoutput("diff -r "+mysdir+" "+dir_w+"/testm" ) print "output is ["+str(output)+"]" assert output[0] == 0 assert output[1] == "", "diff output was not empty..." assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestm" ) assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestm" ) shutil.rmtree( dir_w+"/testm" ) assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol" ) assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_mcol" ) assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo2"] ) assertiCmd(s.adminsession,"ils -lr "+irodshome+"/icmdtestt_mcol", "LIST", ["foo1"] ) if os.path.exists(dir_w+"/testt"): shutil.rmtree( dir_w+"/testt" ) if os.path.exists(dir_w+"/testx"): shutil.rmtree( dir_w+"/testx" ) assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtest "+dir_w+"/testx", "LIST", "testx" ) assertiCmd(s.adminsession,"iget -vr "+irodshome+"/icmdtestt_mcol/icmdtest "+dir_w+"/testt", "LIST", "testt" ) output = commands.getstatusoutput("diff -r "+dir_w+"/testx "+dir_w+"/testt" ) print "output is ["+str(output)+"]" assert output[0] == 0 assert output[1] == "", "diff output was not empty..." assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_mcol/mydirtt" ) assertiCmd(s.adminsession,"iput "+progname+" "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mt "+irodshome+"/icmdtestt_mcol/mydirtt/foo1mtx" ) # unlink assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_mcol" ) # cleanup os.unlink( sfile2 ) shutil.rmtree( dir_w+"/testt" ) shutil.rmtree( dir_w+"/testx" ) if os.path.exists( mysdir ): shutil.rmtree( mysdir ) def test_large_dir_and_mcoll_from_devtest(self): # build expected variables with similar devtest names progname = __file__ myssize = str(os.stat(progname).st_size) username = s.adminsession.getUserName() irodszone = s.adminsession.getZoneName() testuser1 = s.sessions[1].getUserName() irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId irodsdefresource = s.adminsession.getDefResource() dir_w = "." sfile2 = dir_w+"/sfile2" commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 ) mysdir = "/tmp/irodssdir" myldir = dir_w+"/ldir" if os.path.exists( myldir ): shutil.rmtree( myldir ) assertiCmd(s.adminsession,"imkdir icmdtest") # we put foo1 in $irodsdefresource and foo2 in testresource assertiCmd(s.adminsession,"iput -K --wlock "+progname+" "+irodshome+"/icmdtest/foo1" ) assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest/foo1 "+irodshome+"/icmdtest/foo2" ) assertiCmd(s.adminsession,"ibun -c "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtest" ) # added so icmdtestx.tar exists assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large" ) assertiCmd(s.adminsession,"imcoll -m tar "+irodshome+"/icmdtestx.tar "+irodshome+"/icmdtestt_large" ) assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtestt_large/mydirtt" ) # make a directory of 2 large files and 2 small files lfile = dir_w+"/lfile" lfile1 = dir_w+"/lfile1" commands.getstatusoutput( "echo 012345678901234567890123456789012345678901234567890123456789012 > "+lfile ) for i in range(6): commands.getstatusoutput( "cat "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" "+lfile+" > "+lfile1 ) os.rename ( lfile1, lfile ) os.mkdir( myldir ) for i in range(1,3): mylfile = myldir+"/lfile"+str(i) mysfile = myldir+"/sfile"+str(i) if i != 2: shutil.copyfile( lfile, mylfile ) else: os.rename( lfile, mylfile ) shutil.copyfile( progname, mysfile ) # test adding a large file to a mounted collection assertiCmd(s.adminsession,"iput "+myldir+"/lfile1 "+irodshome+"/icmdtestt_large/mydirtt" ) assertiCmd(s.adminsession,"iget "+irodshome+"/icmdtestt_large/mydirtt/lfile1 "+dir_w+"/testt" ) assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large/mydirtt" ) assertiCmd(s.adminsession,"imcoll -s "+irodshome+"/icmdtestt_large" ) assertiCmd(s.adminsession,"imcoll -p "+irodshome+"/icmdtestt_large" ) assertiCmd(s.adminsession,"imcoll -U "+irodshome+"/icmdtestt_large" ) assertiCmd(s.adminsession,"irm -rf "+irodshome+"/icmdtestt_large" ) os.unlink( dir_w+"/testt" ) # cleanup os.unlink( sfile2 ) if os.path.exists( myldir ): shutil.rmtree( myldir ) def test_phybun_from_devtest(self): # build expected variables with similar devtest names progname = __file__ myssize = str(os.stat(progname).st_size) username = s.adminsession.getUserName() irodszone = s.adminsession.getZoneName() testuser1 = s.sessions[1].getUserName() irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId irodsdefresource = s.adminsession.getDefResource() dir_w = "." sfile2 = dir_w+"/sfile2" commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 ) mysdir = "/tmp/irodssdir" myldir = dir_w+"/ldir" if os.path.exists( myldir ): shutil.rmtree( myldir ) assertiCmd(s.adminsession,"imkdir icmdtest") # make a directory containing 20 small files if not os.path.isdir(mysdir): os.mkdir(mysdir) for i in range(20): mysfile = mysdir+"/sfile"+str(i) shutil.copyfile( progname, mysfile ) # iphybun test assertiCmd(s.adminsession,"iput -rR "+self.testresc+" "+mysdir+" "+irodshome+"/icmdtestp" ) assertiCmd(s.adminsession,"iphybun -KR "+self.anotherresc+" "+irodshome+"/icmdtestp" ) assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" ) output = commands.getstatusoutput( "ils -L "+irodshome+"/icmdtestp/sfile1 | tail -n1 | awk '{ print $NF }'") print output[1] bunfile = output[1] assertiCmd(s.adminsession,"irepl --purgec -R "+self.anotherresc+" "+bunfile ) assertiCmd(s.adminsession,"itrim -rS "+self.testresc+" -N1 "+irodshome+"/icmdtestp", "LIST", "files trimmed" ) # get the name of bundle file assertiCmd(s.adminsession,"irm -f --empty "+bunfile ) # should not be able to remove it because it is not empty assertiCmd(s.adminsession,"ils "+bunfile, "LIST", bunfile ) assertiCmd(s.adminsession,"irm -rvf "+irodshome+"/icmdtestp", "LIST", "num files done" ) assertiCmd(s.adminsession,"irm -f --empty "+bunfile ) if os.path.exists(dir_w+"/testp"): shutil.rmtree( dir_w+"/testp" ) shutil.rmtree( mysdir ) # cleanup os.unlink( sfile2 ) if os.path.exists( myldir ): shutil.rmtree( myldir ) if os.path.exists( mysdir ): shutil.rmtree( mysdir ) def test_irsync_from_devtest(self): # build expected variables with similar devtest names progname = __file__ myssize = str(os.stat(progname).st_size) username = s.adminsession.getUserName() irodszone = s.adminsession.getZoneName() testuser1 = s.sessions[1].getUserName() irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId irodsdefresource = s.adminsession.getDefResource() dir_w = "." sfile2 = dir_w+"/sfile2" commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 ) mysdir = "/tmp/irodssdir" myldir = dir_w+"/ldir" if os.path.exists( myldir ): shutil.rmtree( myldir ) assertiCmd(s.adminsession,"imkdir icmdtest") # testing irsync assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" ) assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 "+dir_w+"/foo100" ) assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" ) assertiCmd(s.adminsession,"irm -f "+irodshome+"/icmdtest/foo100 "+irodshome+"/icmdtest/foo200") assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo100") assertiCmd(s.adminsession,"irsync "+progname+" i:"+irodshome+"/icmdtest/foo100" ) assertiCmd(s.adminsession,"iput -R "+self.testresc+" "+progname+" "+irodshome+"/icmdtest/foo200") assertiCmd(s.adminsession,"irsync i:"+irodshome+"/icmdtest/foo100 i:"+irodshome+"/icmdtest/foo200" ) os.unlink( dir_w+"/foo100" ) # cleanup os.unlink( sfile2 ) def test_xml_protocol_from_devtest(self): # build expected variables with similar devtest names progname = __file__ myssize = str(os.stat(progname).st_size) username = s.adminsession.getUserName() irodszone = s.adminsession.getZoneName() testuser1 = s.sessions[1].getUserName() irodshome = "/"+irodszone+"/home/rods/"+s.adminsession.sessionId irodsdefresource = s.adminsession.getDefResource() dir_w = "." sfile2 = dir_w+"/sfile2" commands.getstatusoutput( "cat "+progname+" "+progname+" > "+sfile2 ) mysdir = "/tmp/irodssdir" myldir = dir_w+"/ldir" if os.path.exists( myldir ): shutil.rmtree( myldir ) assertiCmd(s.adminsession,"imkdir icmdtest") lrsfile = dir_w+"/lrsfile" rsfile = dir_w+"/rsfile" # do test using xml protocol os.environ['irodsProt'] = "1" assertiCmd(s.adminsession,"ilsresc", "LIST", self.testresc ) assertiCmd(s.adminsession,"imiscsvrinfo", "LIST", "relVersion" ) assertiCmd(s.adminsession,"iuserinfo", "LIST", "name: "+username ) assertiCmd(s.adminsession,"ienv", "LIST", "Release Version" ) assertiCmd(s.adminsession,"icd "+irodshome ) assertiCmd(s.adminsession,"ipwd", "LIST", "home" ) assertiCmd(s.adminsession,"ihelp ils", "LIST", "ils" ) assertiCmd(s.adminsession,"ierror -14000", "LIST", "SYS_API_INPUT_ERR" ) assertiCmd(s.adminsession,"iexecmd hello", "LIST", "Hello world" ) assertiCmd(s.adminsession,"ips -v", "LIST", "ips" ) assertiCmd(s.adminsession,"iqstat", "LIST", "No delayed rules" ) assertiCmd(s.adminsession,"imkdir "+irodshome+"/icmdtest1" ) # make a directory of large files assertiCmd(s.adminsession,"iput -kf "+progname+" "+irodshome+"/icmdtest1/foo1" ) assertiCmd(s.adminsession,"ils -l "+irodshome+"/icmdtest1/foo1", "LIST", ["foo1", myssize] ) assertiCmd(s.adminsession,"iadmin ls "+irodshome+"/icmdtest1", "LIST", "foo1" ) assertiCmd(s.adminsession,"ichmod read "+s.sessions[1].getUserName()+" "+irodshome+"/icmdtest1/foo1" ) assertiCmd(s.adminsession,"ils -A "+irodshome+"/icmdtest1/foo1", "LIST", s.sessions[1].getUserName()+"#"+irodszone+":read" ) assertiCmd(s.adminsession,"irepl -B -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1" ) # overwrite a copy assertiCmd(s.adminsession,"itrim -S "+irodsdefresource+" -N1 "+irodshome+"/icmdtest1/foo1" ) assertiCmd(s.adminsession,"iphymv -R "+irodsdefresource+" "+irodshome+"/icmdtest1/foo1" ) assertiCmd(s.adminsession,"imeta add -d "+irodshome+"/icmdtest1/foo1 testmeta1 180 cm" ) assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "testmeta1" ) assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "180" ) assertiCmd(s.adminsession,"imeta ls -d "+irodshome+"/icmdtest1/foo1", "LIST", "cm" ) assertiCmd(s.adminsession,"icp -K -R "+self.testresc+" "+irodshome+"/icmdtest1/foo1 "+irodshome+"/icmdtest1/foo2" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo2 "+irodshome+"/icmdtest1/foo4" ) assertiCmd(s.adminsession,"imv "+irodshome+"/icmdtest1/foo4 "+irodshome+"/icmdtest1/foo2" ) assertiCmd(s.adminsession,"ichksum -K "+irodshome+"/icmdtest1/foo2", "LIST",
#remove outliers col_rm_outlier=remove_outlier(col) #adjust max_bin for improving performance if pd.unique(col_rm_outlier).size<max_bin: max_bin_adj=pd.unique(col_rm_outlier).size else: max_bin_adj=max_bin #R pretty bins:cut points looks better but will lose iv or ks gain cuts_remain=R_pretty(np.nanmin(col_rm_outlier),np.nanmax(col_rm_outlier),max_bin_adj) #equal freqs #cuts_remain = np.unique(np.nanpercentile(col_rm_outlier,np.linspace(0, 1, max_bin + 1)[1:-1] * 100, interpolation='lower')) #equal width #_,cuts_remain = np.histogram(col_rm_outlier,max_bin,weights=ws)#histogram bins cuts_tree=[];inds=[];best_criteria=[0] iters=0 # tree cut while True: iters=iters+1 inds=[];c_dis_d=[] for point in cuts_remain.copy(): interval=sorted(np.unique([-np.inf]+[point]+cuts_tree+[np.inf])) col_group=np.digitize(col,interval,right=False) #if point that could not split data(edge point),then pass the iteration if sum(~np.isin(np.arange(start=1,stop=col_group.max()+1),np.unique(col_group)))>0: cuts_remain=cuts_remain[cuts_remain!=point] else: #split data and count the good/bad g=np.zeros(0) b=np.zeros(0) count_g=np.zeros(0) for i in np.unique(col_group): y_g=y[col_group==i] unit_g=count[col_group==i] count_g=np.append(count_g,np.sum(unit_g)) b=np.append(b,np.sum(y_g)) g=np.append(g,np.sum(unit_g) - np.sum(y_g)) bad_prob=b/count_g # count_g=pd.Series(count).groupby(col_group).sum().ravel() # b=pd.Series(y).groupby(col_group).sum().ravel() # g=count_g-b # interim=pd.DataFrame({'count':count,'y':y}).groupby(col_group).sum() # count_g=interim['count'].ravel() # b=interim['y'].ravel() # g=count_g-b #if no good or bad sample in cut of the point,then pass the iteration if 0 in b.tolist() or 0 in g.tolist(): cuts_remain=cuts_remain[cuts_remain!=point] #if cuts_tree with current point shows no monotonic trend,then pass the iteration(no nans in col) elif coerce_monotonic and (not is_str_dtype) and (not is_monotonic(bad_prob)) and (not nan_sum): cuts_remain=cuts_remain[cuts_remain!=point] #if cuts_tree with current point shows no monotonic trend,then pass the iteration(nans in col) elif coerce_monotonic and (not is_str_dtype) and (not is_monotonic(bad_prob[:-1])) and (nan_sum): cuts_remain=cuts_remain[cuts_remain!=point] #else get criteria calculated else: g_dis=g/g.sum() b_dis=b/b.sum() g_dis_cs=np.cumsum(g_dis) b_dis_cs=np.cumsum(b_dis) # nan distr will not be calculated if nan_sum>0: c_dis=(count_g/count_g.sum())[:-1] else: c_dis=count_g/count_g.sum() c_dis_d.append(c_dis) # iv&ks supported,more criterias will be added in future if criteria=='iv': ind=((b_dis-g_dis)*np.log((b_dis+1e-10)/(g_dis+1e-10))).sum() #print(cuts_tree,ind) elif criteria=='ks': ind=np.max(np.abs(g_dis_cs-b_dis_cs)) else: raise ValueError("criteria in 'iv' or 'ks'") inds.append(ind) # stop condition(or): #. len(cuts_remain)==0, #. beyond max_iters, #. criteria_gain stop growing, #. count_distr below limit #. bin num beyond limit if not cuts_remain.size: #print("len(cuts_remain)==0") break else: #get best point and update params best_point=cuts_remain[np.argmax(inds)] #best split point best_point_dist_min=c_dis_d[np.argmax(inds)].min() best_criteria.append(np.max(inds)) #socre at best split point cuts_remain=cuts_remain[cuts_remain!=best_point] # pop best_point out of orgin cut #calculate best_criteria_gain best_criteria_gain=(best_criteria[-1]-best_criteria[-2])/best_criteria[-2] if best_criteria[-2] else 1 cuts_tree.append(best_point) #remove cut point with lower count_distr if best_point_dist_min<distr_limit: cuts_tree.remove(best_point) best_criteria=best_criteria[:-1] bin_num=len(cuts_tree)+1 #bin num if iters>max_iters: #print("max_iters reach") break if best_criteria_gain<tol: #print("best_criteria_gain reach") break if bin_num>=bin_num_limit: #print("bin_num_limit reach") break return (sorted(cuts_tree)) class binChi2(Base,Specials,BaseEstimator): """ 卡方自动分箱,合并卡方值较低的分箱并调整分箱样本量,分箱数至用户定义水平 [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf) 分类特征处理方式:按照badrate对类进行排序并进行ordinal编码再进行卡方分箱(与scorecardpy一致) 分类中不要出现字符空('' or "")类 Params: ------ max_bin=50,初始分箱数, + 使用Pretty Breakpoints获取预分箱点,详见R的pretty函数 + 移除异常值,移除边界点 + 越多的初始分箱数越有可能得到越好的卡方分箱点,但会增加计算量。max_bin=50时与scorecardpy一致 tol=0.1,卡方分箱合并分箱的卡方显著性阈值,tol=0.1下自由度为1的卡方分布的阈值约为2.70554345409542 + 越高的tol合并的箱越少,反之则则合并的箱越多 + 理论上推荐的阈值为0.1,0.05,0.01,实践中可依据需要进行设定 distr_limit=0.05,每一箱的样本占比限制 bin_num_limit=8,分箱总数限制,实际分箱数小于等于bin_num_limit coerce_monotonic=False,是否强制bad_prob单调,默认否 + 本算法中会先在预分箱中强制单调,再进行卡方分箱以保证卡方分箱单调 + 若x与y本身有单调趋势则强制单调能够取得理想的结果,若x与y的关系是非线性关系则强制单调结果会不理想 ws=None,None or pandas.core.series.Series,样本权重 special_values:特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定 + None,保证数据默认 + list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan + dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan n_jobs=-1,int,并行数量,默认-1,在数据量较大、列较多的前提下可极大提升效率但会增加内存占用 verbose=0,并行信息输出等级 Attributes: ------- breaks_list:dict,产生的分箱dict bins:dict,当前breaks_list下的特征分析报告 """ def __init__(self,max_bin=50,tol=0.1,distr_limit=0.05,bin_num_limit=8, coerce_monotonic=False,ws=None,special_values=None,n_jobs=-1,verbose=0): self.max_bin=max_bin self.tol=tol self.distr_limit=distr_limit self.bin_num_limit=bin_num_limit self.ws=ws self.coerce_monotonic=coerce_monotonic self.special_values=special_values self.n_jobs=n_jobs self.verbose=verbose def fit(self, X, y): self._check_data(X, y) n_jobs=effective_n_jobs(self.n_jobs) p=Parallel(n_jobs=n_jobs,verbose=self.verbose) res=p(delayed(self._get_chi2merge)(col[1],y, self.max_bin, self.tol, self.distr_limit, self.bin_num_limit, self.ws, self.coerce_monotonic, self.special_values) for col in X.iteritems()) self.breaks_list={col_name:breaks for col_name,breaks,_ in res} self.bins={col_name:vtab for col_name,_,vtab in res} return self def transform(self, X): return X def _get_chi2merge(self,col,y,max_bin=50,tol=0.1,distr_limit=0.05,bin_num_limit=8,ws=None,coerce_monotonic=False,special_values=None): col_raw=col.copy() col=self._sp_replace_single(col,self._check_spvalues(col.name,special_values),fill_num=np.nan,fill_str='special') #sample_wieght if is_array_like(ws): ws=ws.values if ws.size!=y.size: raise ValueError('length of weight not equal to y') else: ws=np.ones(y.size) #num columns if is_numeric_dtype(col): #print(col.name) #no merge applied when only one unique value existed in col if np.isnan(col).all(): breaks=[] elif np.unique(col[~np.isnan(col)]).size==1: breaks=[] else: #chi2_merge breaks=self._chi2_merge(col.values,y.values,ws=ws,max_bin=max_bin, distr_limit=distr_limit,stop_limit=tol, bin_num_limit=bin_num_limit,is_str_dtype=False, coerce_monotonic=coerce_monotonic) #get vtab using chi2-breaks vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values) elif is_string_dtype(col): #print(col.name) #no merge applied when only one unique value existed in col if np.unique(col).size==1: breaks=[] else: #sort levels by bad_rate(no-wieght) codes=y.groupby(col).mean().sort_values().index.tolist() #ordinal encode data start with 0 map_code=dict(zip(codes,list(range(len(codes))))) #chi2_merge breaks_raw=self._chi2_merge(col.map(map_code).values,y.values,ws=ws, distr_limit=distr_limit,stop_limit=tol, bin_num_limit=bin_num_limit, is_str_dtype=True) #restore string breaks breaks=['%,%'.join(i) for i in np.split(codes,np.int32(breaks_raw)) if i.tolist()] #get vtab using chi2-breaks vtab=varReportSinge().report(col_raw,y,breaks,sample_weight=ws,special_values=special_values) else: raise ValueError("col's dtype in ('number','object')") return col.name,breaks,vtab def _chi2_merge(self,col,y,ws,max_bin=50,distr_limit=0.05,bin_num_limit=8,stop_limit=0.1, is_str_dtype=False,coerce_monotonic=False): if max_bin<2: raise ValueError('max_bin should greater than 1') #get count limit per bin count_limit=distr_limit*ws.sum() #get chi2 threshold at stop_limit threshold = chi2.isf(stop_limit, df=1) #drop nans if is_str_dtype: cuts=np.sort(np.unique(col))[1:] else: #get initial-binning y=y[~np.isnan(col)] ws=ws[~np.isnan(col)] if ws.size!=y.size: raise ValueError('length of weight not equal to y') col=col[~np.isnan(col)] #drop outtliers col_rm_outtlier=remove_outlier(col) #adjust max_bin if np.unique(col_rm_outtlier).size<max_bin: max_bin_adj=np.unique(col_rm_outtlier).size else: max_bin_adj=max_bin #get pretty cuts cuts=R_pretty(np.nanmin(col_rm_outtlier),np.nanmax(col_rm_outtlier),max_bin_adj) #remove edge point in initial-binning cuts=rm_edge_point(col,cuts) if coerce_monotonic: #remove points to make monotonic bad_prob if coerce_monotonic==True cuts=check_monotonirc(col,y,cuts,ws=ws) #calculate chi2 value using initial-binning _,chi2_d,count_list,cuts_bin=self._chi2_bin(col,y,cuts,ws) cuts=np.array([i[0] if i[0]!= -np.inf else i[1] for i in cuts_bin]) #pop points out of initial-binning while True: # #for debug:check length of idx,cuts,count,chi2 # if len(idx) and len(cuts) and len(count_list) and len(chi2_d) and not ((len(idx)+1)==len(cuts)==len(count_list)==len(chi2_d)): # print("len(idx)=={}".format(str(len(idx)))) # print("len(cuts)=={}".format(str(len(cuts)))) # print("len(count_list)=={}".format(str(len(count_list)))) # print("len(chi2_d)=={}".format(str(len(chi2_d)))) # raise ValueError('not (len(idx)==len(cuts)==len(count_list)==len(chi2_d))') if len(cuts)==0 or len(count_list)==0 or len(chi2_d)==0: cuts=np.array([]) break #remove cut point with lowest chi2 value elif chi2_d.min()<threshold: #if string col‘s cuts can not make all bins' chi2 lower than threshold,then merge all bins if is_str_dtype and np.unique(cuts).size==1: cuts=np.array([]) else: #print('point {} out due to chi2'.format(str(cuts[np.argmin(chi2_d)]))) cuts=cuts[cuts!=cuts[np.argmin(chi2_d)]] _,chi2_d,count_list,cuts_bin=self._chi2_bin(col,y,cuts,ws) cuts=np.array([i[0] if i[0]!= -np.inf else i[1] for i in cuts_bin]) #remove cut point with bin count less than user-defined elif count_list.min()<count_limit: #if string col‘s cuts can not make all bins' distr lower than count_limit,then merge all bins if is_str_dtype and np.unique(cuts).size==1: cuts=np.array([]) else: #print('point {} out due to count_limit'.format(str(cuts[np.argmin(count_list)]))) cuts=cuts[cuts!=cuts[np.argmin(count_list)]] _,chi2_d,count_list,cuts_bin=self._chi2_bin(col,y,cuts,ws) cuts=np.array([i[0] if i[0]!= -np.inf else i[1] for i in cuts_bin]) #remove cut point with lowest chi2 value when bin_num higher than user-defined elif len(cuts)>bin_num_limit: #print('point {} out due to bin_num_limit'.format(str(cuts[np.argmin(chi2_d)]))) cuts=cuts[cuts!=cuts[np.argmin(chi2_d)]] _,chi2_d,count_list,cuts_bin=self._chi2_bin(col,y,cuts,ws) cuts=np.array([i[0] if i[0]!= -np.inf else i[1] for i in cuts_bin]) #else break the loop else: break return np.unique(cuts).tolist() def _chi2_bin(self,col,y,cut_off,ws=None): if not np.equal(np.unique(y),np.array([0.,1.])).all(): raise ValueError('y values only in (0.,1.)') # update cut_group cut_off=np.sort(np.unique(cut_off)) cut_off_all=[-np.inf]+cut_off.tolist()+[np.inf] cut_g=np.digitize(col,cut_off_all,right=False) cut_bin=np.array([cut_off_all[i:i + 2] for i in range(len(cut_off_all) - 1)]) #sample weights if is_array_like(ws): ws=ws if ws.size!=y.size: raise ValueError('length of weight not equal to y') else: ws=np.ones(y.size) gid=np.unique(cut_g) chi2_d=[1e3] count_list=[] idx=[] for i in range(len(gid)-1): idx.append(i) y_g_1=y[cut_g==gid[i]] ws_g_1=ws[cut_g==gid[i]] y_g_2=y[cut_g==gid[i+1]] ws_g_2=ws[cut_g==gid[i+1]] # if all vals in y groupby col equal to 0 or 1 then chi2==0 if (np.append(y_g_1,y_g_2)==0).all() or (np.append(y_g_1,y_g_2)==1).all(): xtab=np.array([[ws_g_1[y_g_1==0].sum(),ws_g_2[y_g_2==0].sum()], [ws_g_1[y_g_1==1].sum(),ws_g_2[y_g_2==1].sum()]]) #merge 1st point in bin when distr too low if i==0: count_list.append(xtab[:,0].sum()) count_list.append(xtab[:,1].sum()) #all vals in y groupby col equal to 0 or 1 then chi2==0 chi2_d.append(0.) else: #y-values only in (0,1) xtab=np.array([[ws_g_1[y_g_1==0].sum(),ws_g_2[y_g_2==0].sum()], [ws_g_1[y_g_1==1].sum(),ws_g_2[y_g_2==1].sum()]]) #merge 1st point in bin when distr too low if i==0: count_list.append(xtab[:,0].sum()) count_list.append(xtab[:,1].sum()) #calculate chi2 using scipy ccsq,_,_,_,=chi2_contingency(xtab,correction=False) chi2_d.append(ccsq) return np.array(idx),np.array(chi2_d),np.array(count_list),np.array(cut_bin) class binPretty(Base,Specials,BaseEstimator): """ pretty分箱,使用pretty cuts作为预分箱再调整分箱至用户定义水平 分类特征处理方式:按照badrate对类进行排序并进行ordinal编码再进行分箱 分类中不要出现字符空('' or "")类 Params: ------ max_bin=50,初始分箱数, + 使用Pretty Breakpoints获取预分箱点,详见R的pretty函数 + 移除异常值,移除边界点 + 越多的初始分箱数越有可能得到越好的卡方分箱点,但会增加计算量。max_bin=50时与scorecardpy一致 distr_limit=0.05,每一箱的样本占比限制 bin_num_limit=8,分箱总数限制,实际分箱数小于等于bin_num_limit coerce_monotonic=False,是否强制bad_prob单调,默认否 + 本算法中会先在预分箱中强制单调,再进行合并分箱以保证分箱单调 + 若x与y本身有单调趋势则强制单调能够取得理想的结果,若x与y的关系是非线性关系则强制单调结果会不理想 ws=None,None or pandas.core.series.Series,样本权重 special_values:特殊值指代值,若数据中某些值或某列某些值需特殊对待(这些值不是np.nan)时设定 + None,保证数据默认 + list=[value1,value2,...],数据中所有列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan + dict={col_name1:[value1,value2,...],...},数据中指定列替换,被指定的列的值在[value1,value2,...]中都会被替换,字符被替换为'missing',数值被替换为np.nan n_jobs=-1,int,并行数量,默认-1,在数据量较大、列较多的前提下可极大提升效率但会增加内存占用 verbose=0,并行信息输出等级 Attributes: ------- breaks_list:dict,产生的分箱dict bins:dict,当前breaks_list下的特征分析报告 """ def __init__(self,max_bin=50,distr_limit=0.05,bin_num_limit=8, coerce_monotonic=False,ws=None,special_values=None,n_jobs=-1,verbose=0): self.max_bin=max_bin self.distr_limit=distr_limit self.bin_num_limit=bin_num_limit self.ws=ws self.coerce_monotonic=coerce_monotonic self.special_values=special_values self.n_jobs=n_jobs self.verbose=verbose def fit(self, X, y): self._check_data(X,y) n_jobs=effective_n_jobs(self.n_jobs) p=Parallel(n_jobs=n_jobs,verbose=self.verbose) res=p(delayed(self._get_prettymerge)(col[1],y, self.max_bin, self.distr_limit, self.bin_num_limit, self.ws, self.coerce_monotonic, self.special_values) for col in X.iteritems())
# coding: utf-8 # # Execute this notebook first, after execute the notebook track_improved # In[1]: from utils.definitions import ROOT_DIR filepath = ROOT_DIR+'/data/original/' # file path artist_csv = "artists.csv" track_csv = "tracks.csv" artist_improved_intermediate= "tracks_improved_intermediate.csv" # generated from previous notebook artist_improved_intermediate_dict = "artists_improved_intermediate.csv" # generated from previous notebook artist_improved_final = "tracks_improved.csv" artist_improved_final_dict = "artists_improved.csv" # In[2]: #import import pandas import numpy as np import scipy.sparse as sp import re import collections import tqdm import os # In[3]: # read data df = pandas.read_csv(filepath_or_buffer=filepath+artist_csv,sep="\t",header=0, usecols=['arid','artist_name','artist_uri'], dtype={'arid':np.int32,'artist_name':str, 'artist_uri':str}) df = df [['arid','artist_name','artist_uri']] df.head() # In[4]: #data originals = df['artist_name'].values artists = df['artist_name'].str.lower().values arids = df['arid'].values uris = df['artist_uri'].values #print(arids.shape[0]) # In[5]: # split in main and co-artists (1st level) def reg(vect): exp = '' for s in vect: exp += s + '|' exp = exp [:-1] return exp def split_main_co_artists(value, reg): values = re.split(reg,str(value)) l = len(values) main = [] co = [] if l == 1: main.append(values[0]) elif l == 2: main.append(values[0]) co.append(values [1]) else: main.append(values [0]) for i in range(1,l): co.append(values[i]) return main, co #regex s = [] s.append('\s[\(\[]?featuring[\.\:\.\,]?\s') s.append('\s[\(\[]?featurin[\.\:\.\,]?\s') s.append('\s[\(\[]?featured[\.\:\.\,]?\s') s.append('\s[\(\[]?starring[\.\:\.\,]?\s') s.append('\s[\(\[]?feat[\.\:\.\,]?\s') s.append('\s[\(\[]?ft[\.\:\.\,]?\s') s.append('\s[\(\[]?aka[\.\:\.\,]?\s') s.append('\s[\(\[]?-[\.\:\.\,]?\s') s.append('\s[\(\[]?introducing[\.\:\.\,]?\s') s.append('\s[\(\[]?presents[\.\:\.\,]?\s') s.append('\s[\(\[]?present[\.\:\.\,]?\s') s.append('\s[\(\[]?duet\swith[\.\:\.\,]?\s') s.append('\s[\(\[]?with[\.\:\.\,]?\s') s.append('\sw\/\s') s.append('\sf\/\s') s.append('\s?\/\s?') s.append('\s?\,\s\&\s?') s.append('\smeets?\s') s.append('\sand\shis\s') s.append('\sand\sher\s') s.append('\sand\sthem\s') s.append('\s\&\shis\s') s.append('\s\&\sher\s') s.append('\s\&\sthem\s') s.append('\s\&amp\;?\s') s.append('[(|)]') s.append('[\[|\]]') s.append('[\{|\}]') #spanish cases s.append('\scon\sla\s') s.append('\sy\ssus?\s') s.append('\sy\slos?\s') s.append('\spresenta\:?\s') s.append('\scon\s') s.append('\shaz\s') #other lang s.append('\smit\s') s.append('\savec\s') s.append('perf\.\s') s.append('\slyr\.\s') s.append('\sdir\.\s') #special cases s.append('\sfrom\:\s') s.append('\sed\.\s') s.append('\s?members\sof\sthe\s') s.append('\s?members?\sof\s') s.append('\svol\.?\s') s.append('\s_\s') s.append('performed\sby\s') s.append('\spresents') s.append('\s\'presents\'') s.append('\spresents...') s.append('\spresents\:') s.append('\sfeaturng\s') s.append('\sfeat\,') s.append('[\(\[]feat[\.\:\.\,]') s.append('feat\.') reg_main_co_artists = reg(s) c=0 main_a = [] co_a = [] for a in artists: main, co = split_main_co_artists(a,reg_main_co_artists) main_a.append(main) co_a.append(co) if len(co) + len(main) > 1: c += 1 if(len(main_a) != len(co_a)): print("ERROR") else: pass #print("DONE, found %d instances"%(c)) # In[6]: #split artists 2nd level (split main artists and after split the co-artists ) def split_artists(value, reg): artists = re.split(reg,str(value)) return artists #regex s = [] s.append('\sand\s') s.append('\svs\.?') s.append('\s?\-?conducted\sby\s') s.append('\s?directed\sby\s') s.append('\s?arranged\sby\s') s.append('\sx\s') s.append('\s\&\sco\.') s.append('\s\&\s') s.append('\s?\;\s?') s.append('\s?\,\s?') s.append('\s?\+\s?') #spanish s.append('\sy\s') reg_split_artists = reg(s) main_a2 = [] co_a2 = [] # main artists c1 = 0 for l_a in main_a: new_l = [] for a in l_a: mains = split_artists(a,reg_split_artists) new_l = new_l + mains if len(mains)>1: #print (mains) c1 = c1 + 1 main_a2.append(new_l) # co-artists c2 = 0 for l_a in co_a: new_l = [] for a in l_a: co = split_artists(a,reg_split_artists) new_l = new_l + co if len(co)>1: #print (co) c2 = c2 + 1 co_a2.append(new_l) if(len(main_a2) != len(co_a2)): print("ERROR") else: pass #print("DONE, found %d instances (%d main artists, %d co-artists)"%(c1+c2,c1,c2)) # In[8]: # class artist (attributes and a couple of utility methods) class Artist: def __init__(self, original_artist, arid, uri, main_artists = [], co_artists = [], main_artists_ids = [], co_artists_ids = [], ): self.arid = arid self.original_artist = original_artist self.main = main_artists self.co = co_artists self.main_ids = main_artists_ids self.co_ids = co_artists_ids self.uri = uri self.clean_names() self.shif_co_if_main_empty() def clean_names(self): self.main = list(map(str.strip, self.main)) self.co = list(map(str.strip, self.co)) self.main = list(filter(lambda s: s!='', self.main)) self.co = list(filter(lambda s: s!='', self.co)) def shif_co_if_main_empty(self): #shift first co in main if main is empty (happens when a name of the artist start with parenthesis) if len(self.co) != 0 and len(self.main) ==0: self.main.append(self.co[0]) self.co = self.co[1:] # artist with no name, actually without filter single char happens just one time #if len(self.co) == 0 and len(self.main) == 0: #self.main.append('None') def reset_main_co_ids(self): self.main_ids = [] self.co_ids = [] # In[9]: # create the artist objects final_artists = [] for i in range (0,len(main_a2)): original = originals[i] main = main_a2[i] co = co_a2[i] uri = uris[i] arid = arids[i] final_artists.append(Artist(original,arid,uri,main,co)) # In[10]: # stat and search for attributes def print_info_artist(a): print ("original: \t%s"%(a.original_artist)) print ("main: \t\t%s"%(a.main)) print ("co: \t\t%s"%(a.co)) print ("main ids: \t%s"%(a.main_ids)) print ("co ids: \t%s"%(a.co_ids)) print ("id: \t\t%s"%(a.arid)) print ("uri: \t\t%s"%(a.uri)) return # In[11]: # build new ids for the artists def get_new_id(name): global count if name not in new_dict: new_dict[name] = count count += 1 return new_dict[name] new_dict = {} count = 0 for a in final_artists: a.reset_main_co_ids() for name in a.main: a.main_ids.append(get_new_id(name)) for name in a.co: a.co_ids.append(get_new_id(name)) #print ('new dictionary: %d artist'%(count)) # In[12]: #TODO: clean artist which name is a stop word # like: orquesta, orchestra, friends, karaoke, co., chorus, etc... (look stat analysis at the end for more details) # In[13]: # write new data in a new csv artist_fields = ['arid','artist_uri','main_ids','co_ids','artist_name']#,'main_names','co_names'] full = [] for a in final_artists: row = [] row.append(a.arid) row.append(a.uri) row.append(a.main_ids) row.append(a.co_ids) row.append(a.original_artist) #row.append(a.main) #row.append(a.co) full.append(row) import csv with open(filepath+artist_improved_intermediate, "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(artist_fields) writer.writerows(full) print (artist_improved_intermediate +" created") # In[14]: # build new dictionary usefull for the future work on extraction of artist in the tracks name artist_fields = ['new_arid','new_artist_name'] inv_map = {v: k for k, v in new_dict.items()} if len(inv_map)!=len(new_dict): print('ERROR conversion dictionary') # In[15]: ## write dict in csv import csv full = [] for i in range(0,len(inv_map)): row = [] row.append(i) row.append(inv_map[i]) full.append(row) with open(filepath+artist_improved_intermediate_dict, "w") as f: writer = csv.writer(f,delimiter = "\t",) writer.writerow(artist_fields) writer.writerows(full) print (artist_improved_intermediate_dict +" created") #TODO now or in preprocessing, remove stopwords # # Execute the artists_improved notebook first, after execute this notebook import pandas import numpy as np import re import collections import tqdm from utils.datareader import Datareader import ast # In[3]: df = pandas.read_csv(filepath_or_buffer=filepath+track_csv,sep="\t",header=0, usecols=['tid','arid','alid','track_name'], dtype={'tid':np.int32,'arid':np.int32,'alid':np.int32,'track_name':str}) df = df [['tid','arid','alid','track_name']] df.head() # In[4]: names = df['track_name'].str.lower().values tids = df['tid'].values alids = df['alid'].values arids = df['arid'].values #print('%d total tracks'%tids.shape[0]) # In[5]: # get the full matrix (dataset + testset) dr = Datareader(mode='online', only_load=True, verbose=False ) urm = dr.get_urm() #print(urm.shape) # In[6]: # just focus on songs that appear more than 1 time (-> threshold=2) popularity = urm.sum(axis=0).A1 threshold = 0 ids_usefull_tracks = np.argwhere(popularity>=threshold) #print('%d / %d usefull tracks (threshold >= %d)'%(ids_usefull_tracks.shape[0], popularity.shape[0], threshold)) # In[7]: # class track class Track: def __init__(self, tid, alid, arid, name): self.tid = tid self.alid = alid self.arid = arid self.name = name self.main_ar = [] self.main_ar2 = [] self.co_ar = [] self.co_ar2 = [] # explore dataset function def explore(string, n=10000): c=0 for t in tracks[0:n]: if string in str(t.name): c+=1 print(t.name) print('%d instances'%(c)) def explore_main(string, n=10000): c=0 for t in tracks[0:n]: for a in t.main_ar: if string in str(a): c+=1 print(str(a)) print('%d instances'%(c)) def explore_co(string, n=10000): c=0 for t in tracks[0:n]: for a in t.co_ar: if string in str(a): c+=1 print(str(a)) print('%d instances'%(c)) # In[8]: # filter tracks above threshold and build objects tracks = [] for index in ids_usefull_tracks: index = int(index) #leave this or you get array and no values new_track = Track(tids[index], alids[index], arids[index], names[index]) tracks.append(new_track) #print('%d objects Track created'%len(tracks)) # In[9]: # split in main and co-artist (1st level) def reg(vect): exp = '' for s in vect: exp += s + '|' exp = exp [:-1] return exp def split_name(value, reg): values = re.split(reg,str(value)) l = len(values) main = [] co = [] if l == 1: main.append(values[0]) elif l == 2: main.append(values[0]) co.append(values [1]) else: main.append(values [0]) for i in range(1,l): co.append(values[i]) return main, co def remove_multiple_strings(cur_string, replace_list): for cur_word in replace_list: cur_string = cur_string.replace(cur_word, '') return cur_string #replace list r = [] r.append('remix') r.append('explicit album version') r.append('explicit version') r.append('explicit') #regex s = [] #s.append('\(featuring\.?(.*?)\)') #s.append('\(feat\.?(.*?)\)') #s.append('\((.*?)\)') s.append('[(|)]') s.append('[\[|\]]') s.append('[{|}]') s.append('\s-\s') #s.append('\sfeat\.?\s') reg_names = reg(s) c=0 main_a = [] co_a = [] n=len(tracks) for t in tracks[0:n]: t.main_ar = [] t.co_ar = [] main, co = split_name(t.name,reg_names) if (len(co)>0): #print(str(main)+' % '+str(co)) pass t.main_ar=main t.co_ar=co if len(t.main_ar)==0: print('ERROR splitting') if len(co) + len(main) > 1: c += 1 if(len(main_a) != len(co_a)): print("ERROR") else: pass #print("DONE, found %d instances on %d total"%(c,n)) # In[10]: # clear track names with feat and featuring no inside parenthesis # split main name with no parenthesis def split_artists(value, reg): values = re.split(reg,str(value)) return values def clean_names(names): names = list(map(str.strip, names)) names = list(filter(lambda s: s!='', names)) return names #split track name and artist(s) s=[] s.append('\sfeat[\.\:\.\,]?\s') s.append('\sft[\.\:\.\,]?\s') s.append('\sfeaturing[\.\:\.\,]?\s') #split artists w=[] w.append('\s&\s') w.append('\sand\s') w.append('\,') w.append('from') regex = reg(s) regex2 = reg(w) counter=0 counter2=0 for t in tracks[0:n]: main, co = split_name(t.main_ar[0], regex) t.new_title = main[0] new_co=[] if (len(co)>0): counter+=1 for c in co: new_co+=split_artists(c,regex2) if len(new_co)>1: pass #print(new_co) t.new_ar1 = clean_names(new_co) counter2+=len(t.new_ar1) #print('DONE, found %d instances (%d artist) on %d total'%(counter, counter2, n)) # In[11]: # now the shittiest part, clear thing insides parenthesis word_l=[] word_l+=['feat','featuring','ft.'] w=[] w.append('\s?ft\.?\s') w.append('\s?featuring\.?\s') w.append('\s?feat\.?\s') w.append('\s?feat\.?\s?') w.append('\s&\s') w.append('\s\\\s') w.append('\sand\s') w.append('\s?from\s') w.append('\s?with\s') w.append('\s?extended remix\s?') w.append('\s?extended version\s?') w.append('\s?lp version\s?') w.append('\s?album version\s?') w.append('\s?version\s?') w.append('\s?remix\s?') w.append('\s?explicit\s?') w.append('\s?radio mix\s?') w.append('\s?radio edit\s?') w.append('\s?a cappella\s?') w.append('\s?originally performed by\s?') w.append('\s?performed by\s?') w.append('\s?originally by\s?') w.append('\,') #w.append('from') regex = reg(w) c=0 for t in tracks[0:n]: t.new_ar2 = [] for a in t.co_ar: if any(xs in a for xs in word_l): new_ar = split_artists(a,regex) t.new_ar2 += clean_names(new_ar) c+=len(t.new_ar2) #print(t.new_ar2) #print('DONE, %d artist extracted'%c) # In[12]: # merge the two list c=0 for t in tracks: t.new_ar = [] for a in t.new_ar1: if a not in t.new_ar: t.new_ar.append(a) c+=1 for a in t.new_ar2: if a not in t.new_ar: t.new_ar.append(a) c+=1 #print('DONE, %d total artists extracted'%c) # In[13]: # start the conversion # In[14]: # read data df2 = pandas.read_csv(filepath_or_buffer=filepath+artist_improved_intermediate,sep="\t",header=0, usecols=['arid','artist_name','main_ids','co_ids'], dtype={'arid':np.int32,'artist_name':str, 'main_ids':'O','co_ids':'O'}) df2 = df2 [['arid','main_ids','co_ids']] df2.head() arid = df2['arid'].values mains = df2['main_ids'].values cos = df2['co_ids'].values # In[15]: class Artist: def __init__(self, mains, cos): self.mains = mains self.cos = cos # In[16]: # create dictionary artist ids: old_id-> new_ids n=arid.shape[0] dic_old_new={} for i in range(n): m = np.array(ast.literal_eval(mains[i]), dtype=np.int32).tolist() c = np.array(ast.literal_eval(cos[i]), dtype=np.int32).tolist() dic_old_new[arid[i]]=Artist(m,c) # In[17]: # read dict new artits: new id -> name df3 = pandas.read_csv(filepath_or_buffer=filepath+artist_improved_intermediate_dict,sep="\t",header=0, usecols=['new_arid','new_artist_name'], dtype={'new_arid':np.int32,'new_artist_name':str}) df3 = df3 [['new_arid','new_artist_name']] df3.head() new_arid = df3['new_arid'].values new_name = df3['new_artist_name'].values # In[18]: # dict id->name and dict name->id dict_id_name = {} dict_name_id = {} for i in range(new_arid.shape[0]): dict_id_name[new_arid[i]]=new_name[i] dict_name_id[new_name[i]]=new_arid[i] #print(len(dict_id_name)) #print(len(dict_name_id)) # In[19]: # now let's start the conversion # i consider
# BSD 3-Clause License # # Copyright (c) 2020, IPASC # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import numpy as np from ipasc_tool.core import MetaDatum, MetadataDeviceTags, MetadataAcquisitionTags class PAData: """ The PAData class is the core class for accessing the information contained in the HDF5 files. Using the iohandler.file_reader.load_data method yields an instance of this class. It is structured into three main parts: (1) a numpy array containing the binary data (2) a dictionary with the acquisition metadata (3) a dictionary with the device meta data Furthermore, this class contains convenience methods to access all fields within the HDF5 dictionary, without the necessity to know the internal structure by heart. """ def __init__(self, binary_time_series_data: np.ndarray = None, meta_data_acquisition: dict = None, meta_data_device: dict = None): """ Creates an instance of the PAData class. :param binary_time_series_data: a numpy array that must not be None :param meta_data_acquisition: a dictionary. If None will be initialized as an empty dictionary. :param meta_data_device: a dictionary. If None will be initialized as an empty dictionary. """ if binary_time_series_data is None: binary_time_series_data = None if meta_data_acquisition is None: meta_data_acquisition = dict() if meta_data_device is None: meta_data_device = dict() self.binary_time_series_data = binary_time_series_data self.meta_data_acquisition = meta_data_acquisition self.meta_data_device = meta_data_device def get_illuminator_ids(self) -> list: """ :return: a list of all ids of the illumination elements """ return list(self.meta_data_device[MetadataDeviceTags.ILLUMINATORS.tag].keys()) def get_detector_ids(self): """ :return: a list of all ids of the detection elements """ return self.meta_data_device[MetadataDeviceTags.DETECTORS.tag].keys() def get_acquisition_meta_datum(self, meta_data_tag: MetaDatum) -> object: """ This method returns data from the acquisition meta data dictionary :param meta_data_tag: the MetaDatum instance for which to get the information. :return: return value might be None, if the specified meta data tag was not found in the dictionary. """ if meta_data_tag.tag in self.meta_data_acquisition: return self.meta_data_acquisition[meta_data_tag.tag] else: return None def get_custom_meta_datum(self, meta_data_tag: str) -> object: """ This method returns data from the acquisition meta data dictionary. :param meta_data_tag: a string instance for which to get the information. :return: return value might be None, if the specified meta data tag was not found in the dictionary. """ if meta_data_tag in self.meta_data_acquisition: return self.meta_data_acquisition[meta_data_tag] else: return None def get_device_uuid(self): """ The UUID is a universally unique identifier to the device description that can be referenced. :return: return value can be None, of no UUID was found in the meta data. """ if MetadataDeviceTags.UUID.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]: return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.UUID.tag] else: return None def get_field_of_view(self): """ The field of view defines an approximate cube of the area detectable by the PA imaging device in 3D cartesian coordinates [x1, x2, x3]. The field of view always starts in the origin of the coordinate system (which is defined as the centroid of the top-left transducer element when looking at the device normal to the imaging plane) and expands in the positive x1, x2, x3 directions. :return: return value can be None, of the key was not found in the meta data dictionary. """ if MetadataDeviceTags.FIELD_OF_VIEW.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]: return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.FIELD_OF_VIEW.tag] else: return None def get_number_of_illuminators(self): """ The number of illuminators quantifies the number of illuminators that are used in the respective PA imaging device. Each of these illuminators is described by a set of illumination geometry parameters. :return: return value can be None, of the key was not found in the meta data dictionary. """ if MetadataDeviceTags.NUMBER_OF_ILLUMINATION_ELEMENTS.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]: return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.NUMBER_OF_ILLUMINATION_ELEMENTS.tag] else: return None def get_number_of_detectors(self): """ The number of detectors quantifies the number of transducer elements that are used in the respective PA imaging device. Each of these transducer elements is described by a set of detection geometry parameters. :return: return value can be None, of the key was not found in the meta data dictionary. """ if MetadataDeviceTags.NUMBER_OF_DETECTION_ELEMENTS.tag in self.meta_data_device[MetadataDeviceTags.GENERAL.tag]: return self.meta_data_device[MetadataDeviceTags.GENERAL.tag][MetadataDeviceTags.NUMBER_OF_DETECTION_ELEMENTS.tag] else: return None def get_illuminator_position(self, identifier=None): """ The illuminator position defines the position of the illuminator centroid in 3D cartesian coordinates [x1, x2, x3] . :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_POSITION, identifier) def get_illuminator_orientation(self, identifier=None): """ The illuminator orientation defines the rotation of the illuminator in 3D cartesian coordinates [r1, r2, r3]. It is the normal of the planar illuminator surface. :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_ORIENTATION, identifier) def get_illuminator_geometry(self, identifier=None): """ The illuminator shape defines the shape of the optical fibres, so it describes whether the illuminator is a point illuminator, or has a more continuous form. Illuminators can only have planar emitting surfaces. :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_GEOMETRY, identifier) def get_illuminator_geometry_type(self, identifier=None): """ The illuminator geometry type defines the shape of the optical fibre (bundle) output. It determines the interpretation of the data in the illuminator geometry field. The following geometry types are currently supported: "CIRCULAR" - defined by a single value that determines the radius of the circle "SPHERE" - defined by a single value that determines the radius of the sphere "CUBOID" - defined by three values that determine the extent of the cuboid in x, y, and z dimensions before the position and orientation transforms. "MESH" - defined by a STL-formatted string that determines the positions of points and faces before the position and orientation transforms. :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.ILLUMINATOR_GEOMETRY_TYPE, identifier) def get_wavelength_range(self, identifier=None): """ The wavelength range quantifies the wavelength range that the illuminator is capable of generating by reporting three values: the minimum wavelength max, the maximum wavelength max and a metric for the accuracy accuracy: (min, max, accuracy). This parameter could for instance be (700, 900, 1.2), meaning that this illuminator can be tuned from 700 nm to 900 nm with an accuracy of 1.2 nm. :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.WAVELENGTH_RANGE, identifier) def get_energy_profile(self, identifier=None): """ The laser energy profile field is a discretized functional of wavelength (nm) that represents the laser energy of the illuminator with regard to the wavelength. Thereby, systematic differences in multispectral image acquisitions can be accounted for. :return: return value can be None, of the key was not found in the meta data dictionary. """ return self.get_illuminator_attribute_for_tag(MetadataDeviceTags.LASER_ENERGY_PROFILE, identifier) def get_stability_profile(self, identifier=None): """ The laser noise profile field is a functional of wavelength (nm) that represents the standard deviation of the pulse-to-pulse laser energy of the illuminator with regard to
<reponame>ansao-aci/group-based-policy # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import mock from neutron.api import extensions from neutron.api.rpc.callbacks.producer import registry from neutron import policy from neutron.services.trunk.rpc import server as trunk_server from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron_lib import context from neutron_lib.plugins import constants from neutron_lib.plugins import directory from oslo_utils import importutils from oslo_utils import uuidutils import six import webob.exc from gbpservice.neutron.db import all_models # noqa from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db import servicechain_db as svcchain_db from gbpservice.neutron.extensions import group_policy as gpolicy from gbpservice.neutron.extensions import servicechain as service_chain from gbpservice.neutron.services.grouppolicy.common import ( constants as gp_constants) import gbpservice.neutron.tests from gbpservice.neutron.tests.unit import common as cm from networking_sfc.extensions import flowclassifier from networking_sfc.extensions import sfc JSON_FORMAT = 'json' _uuid = uuidutils.generate_uuid TESTDIR = os.path.dirname(os.path.abspath(gbpservice.neutron.tests.__file__)) ETCDIR = os.path.join(TESTDIR, 'etc') CHAIN_TENANT_ID = 'chain_owner' AGENT_TYPE = 'Open vSwitch agent' AGENT_CONF = {'alive': True, 'binary': 'somebinary', 'topic': 'sometopic', 'agent_type': AGENT_TYPE, 'configurations': {'bridge_mappings': {'physnet1': 'br-eth1'}}} # There are some Neutron extensions which are designated as "required" for # supporting the extensions which are needed to run the GBP UTs. # For example, when using the router plugin, it supports the # "router_availability' extension which requires the # "availability_zone" extension, and which in turn requires the "agent" # extension. For us to be able to use that router plugin as is, we add # those required extensions to the list of "supported_extension_aliases" # for our test plugins. This keeps the extensions framework happy and we # it shouldn't cause a problem since we dont actually exercise those # extensions. The following list contains all such extensions. UNSUPPORTED_REQUIRED_EXTS = ['availability_zone', 'agent'] class ApiManagerMixin(object): agent_conf = AGENT_CONF def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): resource_plural = cm.get_resource_plural(resource) res = self._list(resource_plural, neutron_context=neutron_context, query_params=query_params) resource = resource.replace('-', '_') self.assertEqual(sorted([i['id'] for i in res[resource_plural]]), sorted([i[resource]['id'] for i in items])) def _create_resource(self, type, expected_res_status=None, is_admin_context=False, deserialize=True, **kwargs): plural = cm.get_resource_plural(type) type = type.split('/')[-1] try: defaults = getattr(cm, 'get_create_%s_default_attrs' % type)() defaults.update(kwargs) except AttributeError: defaults = kwargs data = {type: {'tenant_id': self._tenant_id}} data[type].update(defaults) req = self.new_create_request(plural, data, self.fmt) req.environ['neutron.context'] = context.Context( '', kwargs.get('tenant_id', self._tenant_id) if not is_admin_context else self._tenant_id, is_admin_context) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) elif deserialize and res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) if deserialize else res def _update_resource(self, id, type, expected_res_status=None, is_admin_context=False, api=None, deserialize=True, **kwargs): plural = cm.get_resource_plural(type) type = type.split('/')[-1] data = {type: kwargs} tenant_id = kwargs.pop('tenant_id', self._tenant_id) # Create PT with bound port req = self.new_update_request(plural, data, id, self.fmt) req.environ['neutron.context'] = context.Context( '', tenant_id if not is_admin_context else self._tenant_id, is_admin_context) res = req.get_response(api or self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) elif deserialize and res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) if deserialize else res def _show_resource(self, id, plural, expected_res_status=None, is_admin_context=False, tenant_id=None, deserialize=True): req = self.new_show_request(plural, id, fmt=self.fmt) req.environ['neutron.context'] = context.Context( '', tenant_id or self._tenant_id, is_admin_context) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) elif deserialize and res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) if deserialize else res def _delete_resource(self, id, plural, is_admin_context=False, expected_res_status=None, tenant_id=None, deserialize=True): req = self.new_delete_request(plural, id) req.environ['neutron.context'] = context.Context( '', tenant_id or self._tenant_id, is_admin_context) res = req.get_response(self.ext_api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) elif deserialize and res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) if res.status_int != 204: return self.deserialize(self.fmt, res) if deserialize else res def _get_object(self, type, id, api, expected_res_status=None): req = self.new_show_request(type, id, self.fmt) res = req.get_response(api) if expected_res_status: self.assertEqual(expected_res_status, res.status_int) elif res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) def _list_resource(self, plural, api, is_admin_context=False, expected_res_status=None, tenant_id=None, **kwargs): param_str = '&'.join(['%s=%s' % (k, v) for k, v in six.iteritems(kwargs)]) req = self.new_list_request(plural, self.fmt, params=param_str or None) req.environ['neutron.context'] = context.Context( '', tenant_id or self._tenant_id, is_admin_context) res = req.get_response(api) if expected_res_status: self.assertEqual(res.status_int, expected_res_status) elif res.status_int >= webob.exc.HTTPClientError.code: raise webob.exc.HTTPClientError(code=res.status_int) return self.deserialize(self.fmt, res) def _bind_port_to_host(self, port_id, host, data=None): plugin = directory.get_plugin() ctx = context.get_admin_context() agent = {'host': host} agent.update(self.agent_conf) plugin.create_or_update_agent(ctx, agent) data = data or {'port': {'binding:host_id': host, 'device_owner': 'a', 'device_id': 'b'}} # Create EP with bound port req = self.new_update_request('ports', data, port_id, self.fmt) return self.deserialize(self.fmt, req.get_response(self.api)) def _bind_subport(self, ctx, trunk, port): with mock.patch.object(trunk_server.TrunkSkeleton, '__init__', return_value=None): trunk_skeleton = trunk_server.TrunkSkeleton() port['trunk_id'] = trunk['trunk']['id'] port['port_id'] = port['id'] trunk_skeleton.update_subport_bindings(ctx, [port]) def _unbind_port(self, port_id): data = {'port': {'binding:host_id': ''}} req = self.new_update_request('ports', data, port_id, self.fmt) return self.deserialize(self.fmt, req.get_response(self.api)) class GroupPolicyDBTestBase(ApiManagerMixin): resource_prefix_map = dict( (k, gp_constants.GBP_PREFIXES[constants.SERVICECHAIN]) for k in service_chain.RESOURCE_ATTRIBUTE_MAP.keys()) resource_prefix_map.update(dict( (k, gp_constants.GBP_PREFIXES[constants.GROUP_POLICY]) for k in gpolicy.RESOURCE_ATTRIBUTE_MAP.keys() )) fmt = JSON_FORMAT def __getattr__(self, item): # Verify is an update of a proper GBP object def _is_sc_resource(plural): return plural in service_chain.RESOURCE_ATTRIBUTE_MAP def _is_gbp_resource(plural): return plural in gpolicy.RESOURCE_ATTRIBUTE_MAP def _is_sfc_resource(plural): return plural in sfc.RESOURCE_ATTRIBUTE_MAP def _is_flowc_resource(plural): return plural in flowclassifier.RESOURCE_ATTRIBUTE_MAP def _is_valid_resource(plural): return (_is_gbp_resource(plural) or _is_sc_resource(plural) or _is_flowc_resource(plural) or _is_sfc_resource(plural)) def _get_prefix(plural): if _is_flowc_resource(plural) or _is_sfc_resource(plural): return 'sfc/' return '' # Update Method if item.startswith('update_'): resource = item[len('update_'):] plural = cm.get_resource_plural(resource) if _is_valid_resource(plural): r = _get_prefix(plural) + resource def update_wrapper(id, **kwargs): return self._update_resource(id, r, **kwargs) return update_wrapper # Show Method if item.startswith('show_'): resource = item[len('show_'):] plural = cm.get_resource_plural(resource) if _is_valid_resource(plural): def show_wrapper(id, **kwargs): p = _get_prefix(plural) + plural return self._show_resource(id, p, **kwargs) return show_wrapper # Create Method if item.startswith('create_'): resource = item[len('create_'):] plural = cm.get_resource_plural(resource) if _is_valid_resource(plural): def create_wrapper(**kwargs): r = _get_prefix(plural) + resource return self._create_resource(r, **kwargs) return create_wrapper # Delete Method if item.startswith('delete_'): resource = item[len('delete_'):] plural = cm.get_resource_plural(resource) if _is_valid_resource(plural): def delete_wrapper(id, **kwargs): p = _get_prefix(plural) + plural return self._delete_resource(id, p, **kwargs) return delete_wrapper raise AttributeError def _get_resource_plural(self, resource): if resource.endswith('y'): resource_plural = resource.replace('y', 'ies') else: resource_plural = resource + 's' return resource_plural def _get_resource_singular(self, resource_plural): if resource_plural.endswith('ies'): return resource_plural.replace('ies', 'y') else: return resource_plural[:-1] def _test_list_resources(self, resource, items, neutron_context=None, query_params=None): resource_plural = self._get_resource_plural(resource) res = self._list(resource_plural, neutron_context=neutron_context, query_params=query_params) params = None if query_params: params = query_params.split('&') params = dict((x.split('=')[0], x.split('=')[1].split(',')) for x in params) count = getattr(self.plugin, 'get_%s_count' % resource_plural)( neutron_context or context.get_admin_context(), params) self.assertEqual(len(res[resource_plural]), count) resource = resource.replace('-', '_') self.assertEqual(sorted([i['id'] for i in res[resource_plural]]), sorted([i[resource]['id'] for i in items])) def _create_profiled_servicechain_node( self, service_type=constants.LOADBALANCERV2, shared_profile=False, profile_tenant_id=None, **kwargs): prof = self.create_service_profile( service_type=service_type, shared=shared_profile, tenant_id=profile_tenant_id or self._tenant_id)['service_profile'] return self.create_servicechain_node( service_profile_id=prof['id'], **kwargs) def _set_notification_mocks(self): self.l3_notify_p = mock.patch( 'neutron.extensions.l3agentscheduler.notify').start() self.l3_periodic_p = mock.patch( 'neutron.db.l3_agentschedulers_db.L3AgentSchedulerDbMixin.' 'add_periodic_l3_agent_status_check').start() self.dhcp_notify_p = mock.patch( 'neutron.extensions.dhcpagentscheduler.notify').start() self.dhcp_notifier_schedule = mock.patch( 'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.' 'DhcpAgentNotifyAPI._schedule_network').start() def _unset_notification_mocks(self): self.l3_notify_p.stop() self.l3_periodic_p.stop() self.dhcp_notify_p.stop() self.dhcp_notifier_schedule.stop() class GroupPolicyDBTestPlugin(gpdb.GroupPolicyDbPlugin): supported_extension_aliases = ['group-policy'] + UNSUPPORTED_REQUIRED_EXTS path_prefix = "/grouppolicy" DB_GP_PLUGIN_KLASS = (GroupPolicyDBTestPlugin.__module__ + '.' + GroupPolicyDBTestPlugin.__name__) class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin): supported_extension_aliases = ['servicechain'] + UNSUPPORTED_REQUIRED_EXTS path_prefix = "/servicechain" DB_SC_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' + ServiceChainDBTestPlugin.__name__) class GroupPolicyDbTestCase(GroupPolicyDBTestBase, test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, core_plugin=None, sc_plugin=None, service_plugins=None, ext_mgr=None, gp_plugin=None): sc_plugin = sc_plugin or DB_SC_PLUGIN_KLASS gp_plugin = gp_plugin or DB_GP_PLUGIN_KLASS if not service_plugins: service_plugins = { 'l3_plugin_name': 'router', 'flavors_plugin_name': 'neutron.services.flavors.' 'flavors_plugin.FlavorsPlugin', 'gp_plugin_name': gp_plugin, 'sc_plugin_name': sc_plugin} # Always install SFC plugin for convenience service_plugins['sfc_plugin_name'] = 'sfc' service_plugins['flowc_plugin_name'] = 'flow_classifier' extensions.append_api_extensions_path( gbpservice.neutron.extensions.__path__) service_plugins['flavors_plugin_name'] =\ 'neutron.services.flavors.flavors_plugin.FlavorsPlugin' super(GroupPolicyDbTestCase, self).setUp( plugin=core_plugin, ext_mgr=ext_mgr, service_plugins=service_plugins ) test_policy_file = ETCDIR + "/test-policy.json" policy.refresh(policy_file=test_policy_file) self.plugin = importutils.import_object(gp_plugin) self._sc_plugin = importutils.import_object(sc_plugin) if not ext_mgr: ext_mgr = extensions.PluginAwareExtensionManager.get_instance() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) plugins = directory.get_plugins() self._gbp_plugin = plugins.get(constants.GROUP_POLICY) self._sc_plugin = plugins.get(constants.SERVICECHAIN) self._l3_plugin = plugins.get(constants.L3) self._set_notification_mocks() # The following is done to stop the neutron code from checking # for dhcp agents if '_aliases' in plugins.get('CORE').__dict__: if 'agent' in plugins.get('CORE').__dict__['_aliases']: plugins.get('CORE').__dict__['_aliases'].remove('agent') if 'dhcp_agent_scheduler' in plugins.get('CORE').__dict__[ '_aliases']: plugins.get('CORE').__dict__['_aliases'].remove( 'dhcp_agent_scheduler') def tearDown(self): self._unset_notification_mocks() registry.clear() super(GroupPolicyDbTestCase, self).tearDown() class TestGroupResources(GroupPolicyDbTestCase): def _test_show_resource(self, resource, resource_id, attrs): resource_plural = cm.get_resource_plural(resource) req = self.new_show_request(resource_plural, resource_id, fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in six.iteritems(attrs): self.assertEqual(res[resource][k], v) def test_create_and_show_policy_target(self): ptg_id = self.create_policy_target_group()['policy_target_group']['id'] attrs = cm.get_create_policy_target_default_attrs( policy_target_group_id=ptg_id) pt = self.create_policy_target(policy_target_group_id=ptg_id) for k, v in six.iteritems(attrs): self.assertEqual(pt['policy_target'][k], v) req = self.new_show_request( 'policy_target_groups', ptg_id, fmt=self.fmt) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(res['policy_target_group']['policy_targets'], [pt['policy_target']['id']]) self._test_show_resource( 'policy_target', pt['policy_target']['id'], attrs) def test_list_policy_targets(self): pts = [self.create_policy_target(name='pt1', description='pt'), self.create_policy_target(name='pt2', description='pt'), self.create_policy_target(name='pt3', description='pt')] self._test_list_resources('policy_target', pts, query_params='description=pt') def test_update_policy_target(self): name = 'new_policy_target' description = 'new desc' attrs = cm.get_create_policy_target_default_attrs( name=name, description=description) pt = self.create_policy_target() data =
<filename>TooManyStalkers/bot.py import sys import sc2 from sc2.ids.ability_id import AbilityId from sc2.ids.unit_typeid import UnitTypeId from sc2.ids.upgrade_id import UpgradeId from sc2.ids.buff_id import BuffId from sc2.unit import Unit from sc2.units import Units from sc2.position import Point2, Point3 from loguru import logger logger.remove() logger.add(sys.stderr, level="INFO") # All townhalls to check if the enemy's main base is destroyed TOWNHALLS = { UnitTypeId.COMMANDCENTER, UnitTypeId.COMMANDCENTERFLYING, UnitTypeId.ORBITALCOMMAND, UnitTypeId.ORBITALCOMMANDFLYING, UnitTypeId.PLANETARYFORTRESS, UnitTypeId.HATCHERY, UnitTypeId.LAIR, UnitTypeId.HIVE, UnitTypeId.NEXUS } # The bot class class TooManyStalkersBot(sc2.BotAI): def __init__(self): """Inititialize variables """ super().__init__() # The maximum amount of workers self.MAX_WORKERS = 80 # The maximum amount of Nexuses self.MAX_NEXUSES = 3 # The upgrades that will be researched self.UPGRADES = ["PROTOSSGROUNDWEAPONSLEVEL", "PROTOSSGROUNDWEAPONSLEVEL", "PROTOSSSHIELDSLEVEL"] # If the enemy was greeted and our main base self.greeted = False self.main: Unit = None # The Proxy and the Proxy position self.proxy: Unit = None self.proxy_position: Point3 = Point3() # How many attempts must be made to rebuild the Proxy Pylon self.MAX_PROXY_ATTEMPTS = 3 # How many attempts have been made thusfar self.proxy_attempts = 0 # The defending Stalkers, the wall-off unit, and the position to defend self.bases_defenders: dict = {} self.wall_unit: Unit = None # The attacking Stalkers, how many attacks have happend self.attackers: Units = Units([], self) # The amount of timing attacks self.timing_attack: int = 0 # The amount of attacks self.attack_amount = 0 # If the enemy's main base is destroyed self.enemy_main_destroyed_triggerd = False # How many attacking Stalker there must be for every defending Stalker self.attack_defend_ratio = 6/1 # If we should debug and if we should debug once (testing only) self.DEBUG = False self.debug_once = True async def on_before_start(self): """Before the game starts """ # Calculate the Proxy location self.proxy_position = self.get_proxy_location() logger.info(f"Proxy position: {self.proxy_position}") # Get the main base self.main = self.townhalls.first async def on_step(self, iteration: int): """What to do each step Args: iteration (int): what number step it currently is """ # Greet the opponent if iteration > 5 and not self.greeted: logger.info("Greeted the enemy") self.greeted = True await self.chat_send(f"Hello {self.opponent_id}, GL HF") # Debug and draw on screen await self.debug() await self.debug_draw() # (Built-in) Distribute workers await self.distribute_workers() # Manage the main base await self.manage_bases() # Build Pylons and await self.build_pylons() # Build a Proxy Pylon await self.build_proxy() # Collect Vespene Gas await self.collect_gas() # Build research buildings and research await self.build_research_structures() await self.research() # Build Gateways/Warpgates, train/warp units, attack await self.build_unit_structures() await self.train_units() await self.expand() await self.attack() async def debug(self): """Spawn 5 Stalkers if self.DEBUG is true """ if self.DEBUG and self.debug_once: logger.info("Created 5 Stalkers") await self._client.debug_create_unit( [[UnitTypeId.STALKER, 5, self.main.position, 1]]) self.debug_once = False async def debug_draw(self): """Draw text and spheres for debugging purposes """ # Draw spheres at the Proxy and the defense position self._client.debug_sphere_out( self.proxy_position, 2, color=(255, 0, 0)) # If there are attackers, put text on their position if self.attackers: for stalker in self.units.tags_in(self.attackers): self._client.debug_text_world("Attacker", stalker, color=(255, 255, 0)) for base_defenders in self.bases_defenders.values(): # If there are defenders, put text on their position if base_defenders: for stalker in self.units.tags_in(base_defenders): self._client.debug_text_world("Defender", stalker, color=(255, 0, 255)) # If there is a wall-unit, put text on their position if self.wall_unit: wall_unit = self.units.tags_in([self.wall_unit.tag]) if len(wall_unit) > 0: self.wall_unit = wall_unit[0] self._client.debug_text_world("Wall-off", self.wall_unit, color=(255, 255, 255)) if self.structures(UnitTypeId.NEXUS).exists: nexuses = self.structures(UnitTypeId.NEXUS) for nexus in nexuses: self._client.debug_sphere_out( nexus.position3d, 4, color=(40, 240, 250)) async def manage_bases(self): """Handle the Chronoboost for each Nexus and produce workers """ # Loop over all the Nexuses for nexus in self.townhalls: # Handle Chronoboost await self.chronoboost(nexus) # Train Probes if ( nexus.is_idle and self.workers.amount < self.MAX_WORKERS and self.can_afford(UnitTypeId.PROBE) ): nexus.train(UnitTypeId.PROBE) async def build_pylons(self): """Build Pylons if the supply is too low """ # If the Pylon for the wall-off wasn't built, build it wall_pylon = self.main_base_ramp.protoss_wall_pylon if ( await self.can_place_single(UnitTypeId.PYLON, wall_pylon) and self.can_afford(UnitTypeId.PYLON) ): await self.build(UnitTypeId.PYLON, wall_pylon) # If there is 8 supply or less left, build a Pylon if ( self.supply_left <= 8 and self.supply_cap < 200 and self.townhalls.ready.exists and not self.already_pending(UnitTypeId.PYLON) and self.can_afford(UnitTypeId.PYLON) ): position = self.townhalls.ready.random.position.towards( self.game_info.map_center, 15 ) await self.build(UnitTypeId.PYLON, near=position) async def build_proxy(self): """Builds a Proxy Pylon if the Warpgate research is a quarter done """ # Build a Proxy Pylon once the Warpgate Research is for 25% done if ( self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) > 0.25 and self.proxy is None and self.proxy_attempts < self.MAX_PROXY_ATTEMPTS and self.can_afford(UnitTypeId.PYLON) and not self.already_pending(UnitTypeId.PYLON) ): # If this is first attempt at Proxy, use calculated position if self.proxy_attempts == 0: pos = self.proxy_position # If this isn't the first attempt, calculate new location elif self.proxy_attempts > 0: pos = self.get_proxy_location() logger.info("Proxy position changed to: " f"{self.proxy_position}") # Build a Proxy Pylon logger.info(f"Building a proxy at {self.proxy_position}") await self.build(UnitTypeId.PYLON, pos) # Increment the Proxy attempts self.proxy_attempts += 1 async def collect_gas(self): """Collect Vespene Gas after a Gateway was build """ # Only collect Gas when a Gateway was built if self.structures(UnitTypeId.GATEWAY).exists: # Loop over all the Nexuses for nexus in self.townhalls.ready: # Get all the Vespene Geysers vespenenes: Units = self.vespene_geyser.closer_than(10, nexus) # Loop over all the Vespene Geysers for vespene in vespenenes: # Build an Assimilator on top of the Vespene Geysers if ( await self.can_place_single( UnitTypeId.ASSIMILATOR, vespene.position) and not self.already_pending(UnitTypeId.ASSIMILATOR) and self.can_afford(UnitTypeId.ASSIMILATOR) ): await self.build(UnitTypeId.ASSIMILATOR, vespene) async def build_research_structures(self): """Build structures to research from """ # None of the structures can be build if we don't have Pylons if self.structures(UnitTypeId.PYLON).ready.exists: # Build Research buildings by Pylon that aren't the Proxy or Cannon pylon: Unit = self.structures(UnitTypeId.PYLON).ready.random while pylon == self.proxy: pylon = self.structures(UnitTypeId.PYLON).ready.random # If we have a Gateway, build a Cybernetics Core if ( self.structures(UnitTypeId.GATEWAY).ready.exists and self.structures(UnitTypeId.CYBERNETICSCORE).amount == 0 and self.can_afford(UnitTypeId.CYBERNETICSCORE) ): await self.build(UnitTypeId.CYBERNETICSCORE, near=pylon) # If we have a Cybernetics Core, build a Forge if ( self.structures(UnitTypeId.CYBERNETICSCORE).exists and self.already_pending(UnitTypeId.FORGE) == 0 and not self.structures(UnitTypeId.FORGE).exists and self.can_afford(UnitTypeId.FORGE) ): await self.build(UnitTypeId.FORGE, near=pylon) # If the Forge is at it's last upgrade, build a Twilight Council if ( self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) > 0 and self.structures(UnitTypeId.TWILIGHTCOUNCIL) == 0 and self.can_afford(UnitTypeId.TWILIGHTCOUNCIL) ): await self.build(UnitTypeId.TWILIGHTCOUNCIL, near=pylon) async def research(self): """Research Warpgates, Weapons, Armor and Shields """ # If we have a Cybernetics Core, research Warpgates if self.structures(UnitTypeId.CYBERNETICSCORE).ready.exists: # Select a Cybernetics Core and research Warpgate ccore = self.structures(UnitTypeId.CYBERNETICSCORE).ready.first ccore.research(UpgradeId.WARPGATERESEARCH) # If we have a Forge and Warpgates are researching, research upgrades if ( self.structures(UnitTypeId.FORGE).ready.exists and self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) > 0 ): # Select a Forge and upgrade forge = self.structures(UnitTypeId.FORGE).ready.first # Research Ground Weapons if ( self.can_afford( UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) and self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 0 ): forge.research(UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) # Research Ground Armor elif ( self.can_afford( UpgradeId.PROTOSSGROUNDARMORSLEVEL1) and self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 0 and self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1 ): forge.research(UpgradeId.PROTOSSGROUNDARMORSLEVEL1) # Research Shields elif ( self.can_afford( UpgradeId.PROTOSSSHIELDSLEVEL1) and self.already_pending_upgrade( UpgradeId.PROTOSSSHIELDSLEVEL1) == 0 and self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDARMORSLEVEL1) == 1 and self.already_pending_upgrade( UpgradeId.PROTOSSGROUNDWEAPONSLEVEL1) == 1 ): forge.research(UpgradeId.PROTOSSSHIELDSLEVEL1) async def build_unit_structures(self): """Build Gateways """ # Build Gateways only when there is a Pylon if self.structures(UnitTypeId.PYLON).ready.exists: # Get the placement positions for a wall wall_buildings = self.main_base_ramp.protoss_wall_buildings # See if it can place a building on the position and build it for wall_building in wall_buildings: if ( await self.can_place_single(UnitTypeId.GATEWAY, wall_building) and self.can_afford(UnitTypeId.GATEWAY) ): await self.build(UnitTypeId.GATEWAY, wall_building) # Build Gateways by Pylons that aren't the Proxy or the Cannon pylon: Unit = self.structures(UnitTypeId.PYLON).ready.random while pylon == self.proxy: pylon = self.structures(UnitTypeId.PYLON).ready.random # Build Gateways once the Warpgate Research is done if ( self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1 and self.townhalls.amount > 1 and self.structures( UnitTypeId.WARPGATE).amount < self.max_gateways and self.can_afford(UnitTypeId.GATEWAY) ): await self.build(UnitTypeId.GATEWAY, near=pylon) async def train_units(self): """Train Stalkers from Gateways and warp them in with Warpgates """ # Build a Wall-off unit if there is not one if ( self.wall_unit is None and self.can_afford(UnitTypeId.ZEALOT) and not self.already_pending(UnitTypeId.ZEALOT) ): # If we have Warpgates if ( self.already_pending_upgrade(UpgradeId.WARPGATERESEARCH) == 1 and self.structures(UnitTypeId.WARPGATE).ready.exists ): # Select a random Warpgate warpgate = self.structures(UnitTypeId.WARPGATE).ready.random # Get the available abilities of the Warpgate abilities = await self.get_available_abilities(warpgate) # Warp a Zealot if we can warp if AbilityId.WARPGATETRAIN_ZEALOT in abilities: # Select a random Pylon that isn't the Proxy or cannon pylon = self.structures(UnitTypeId.PYLON).ready.random
# -*- coding: utf8 -*- # Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException from tencentcloud.common.abstract_client import AbstractClient from tencentcloud.vod.v20180717 import models class VodClient(AbstractClient): _apiVersion = '2018-07-17' _endpoint = 'vod.tencentcloudapi.com' def ApplyUpload(self, request): """* 该接口用于申请媒体文件(和封面文件)的上传,获取文件上传到腾讯云点播的元信息(包括上传路径、上传签名等),用于后续上传接口。 * 上传流程请参考[服务端上传综述](https://cloud.tencent.com/document/product/266/9759#.E4.B8.8A.E4.BC.A0.E6.B5.81.E7.A8.8B)。 :param request: 调用ApplyUpload所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.ApplyUploadRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.ApplyUploadResponse` """ try: params = request._serialize() body = self.call("ApplyUpload", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ApplyUploadResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CommitUpload(self, request): """该接口用于确认媒体文件(和封面文件)上传到腾讯云点播的结果,并存储媒体信息,返回文件的播放地址和文件 ID。 :param request: 调用CommitUpload所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.CommitUploadRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.CommitUploadResponse` """ try: params = request._serialize() body = self.call("CommitUpload", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CommitUploadResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def ConfirmEvents(self, request): """* 开发者调用拉取事件通知,获取到事件后,必须调用该接口来确认消息已经收到; * 开发者获取到事件句柄后,等待确认的有效时间为 30 秒,超出 30 秒会报参数错误(4000); * 更多参考[服务端事件通知](https://cloud.tencent.com/document/product/266/7829)。 :param request: 调用ConfirmEvents所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.ConfirmEventsRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.ConfirmEventsResponse` """ try: params = request._serialize() body = self.call("ConfirmEvents", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.ConfirmEventsResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateClass(self, request): """* 用于对媒体进行分类管理; * 该接口不影响既有媒体的分类,如需修改媒体分类,请调用[修改媒体文件属性](/document/product/266/31762)接口。 * 分类层次不可超过 4 层。 * 每个分类的子类数量不可超过 500 个。 :param request: 调用CreateClass所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.CreateClassRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.CreateClassResponse` """ try: params = request._serialize() body = self.call("CreateClass", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateClassResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateProcedureTemplate(self, request): """创建用户自定义的任务流模板,模板上限:50。 :param request: 调用CreateProcedureTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.CreateProcedureTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.CreateProcedureTemplateResponse` """ try: params = request._serialize() body = self.call("CreateProcedureTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateProcedureTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateTranscodeTemplate(self, request): """创建用户自定义转码模板,数量上限:1000。 :param request: 调用CreateTranscodeTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.CreateTranscodeTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.CreateTranscodeTemplateResponse` """ try: params = request._serialize() body = self.call("CreateTranscodeTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateTranscodeTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def CreateWatermarkTemplate(self, request): """创建用户自定义水印模板,数量上限:1000。 :param request: 调用CreateWatermarkTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.CreateWatermarkTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.CreateWatermarkTemplateResponse` """ try: params = request._serialize() body = self.call("CreateWatermarkTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.CreateWatermarkTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteClass(self, request): """* 仅当待删分类无子分类且无媒体关联情况下,可删除分类; * 否则,请先执行[删除媒体](/document/product/266/31764)及子分类,再删除该分类; :param request: 调用DeleteClass所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DeleteClassRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DeleteClassResponse` """ try: params = request._serialize() body = self.call("DeleteClass", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteClassResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteMedia(self, request): """* 删除媒体及其对应的视频处理文件(如转码视频、雪碧图、截图、微信发布视频等); * 可单独删除指定 ID 的视频文件下的转码,或者微信发布文件; :param request: 调用DeleteMedia所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DeleteMediaRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DeleteMediaResponse` """ try: params = request._serialize() body = self.call("DeleteMedia", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteMediaResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteProcedureTemplate(self, request): """删除指定名字的任务流模板 :param request: 调用DeleteProcedureTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DeleteProcedureTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DeleteProcedureTemplateResponse` """ try: params = request._serialize() body = self.call("DeleteProcedureTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteProcedureTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteTranscodeTemplate(self, request): """删除用户自定义转码模板。 :param request: 调用DeleteTranscodeTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DeleteTranscodeTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DeleteTranscodeTemplateResponse` """ try: params = request._serialize() body = self.call("DeleteTranscodeTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteTranscodeTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DeleteWatermarkTemplate(self, request): """删除用户自定义水印模板。 :param request: 调用DeleteWatermarkTemplate所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DeleteWatermarkTemplateRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DeleteWatermarkTemplateResponse` """ try: params = request._serialize() body = self.call("DeleteWatermarkTemplate", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DeleteWatermarkTemplateResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeAllClass(self, request): """* 获得用户的所有分类信息。 :param request: 调用DescribeAllClass所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeAllClassRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeAllClassResponse` """ try: params = request._serialize() body = self.call("DescribeAllClass", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeAllClassResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeMediaInfos(self, request): """1. 该接口可以获取多个视频的多种信息,包括: 1. 基础信息(basicInfo):包括视频名称、大小、时长、封面图片等。 2. 元信息(metaData):包括视频流信息、音频流信息等。 3. 转码结果信息(transcodeInfo):包括该视频转码生成的各种码率的视频的地址、规格、码率、分辨率等。 4. 转动图结果信息(animatedGraphicsInfo):对视频转动图(如 gif)后,动图相关信息。 5. 采样截图信息(sampleSnapshotInfo):对视频采样截图后,相关截图信息。 6. 雪碧图信息(imageSpriteInfo):对视频截取雪碧图之后,雪碧图的相关信息。 7. 指定时间点截图信息(snapshotByTimeOffsetInfo):对视频依照指定时间点截图后,各个截图的信息。 8. 视频打点信息(keyFrameDescInfo):对视频设置的各个打点信息。 2. 可以指定回包只返回部分信息。 :param request: 调用DescribeMediaInfos所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeMediaInfosRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeMediaInfosResponse` """ try: params = request._serialize() body = self.call("DescribeMediaInfos", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeMediaInfosResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeProcedureTemplates(self, request): """根据任务流模板名字,获取任务流模板详情列表。 :param request: 调用DescribeProcedureTemplates所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeProcedureTemplatesRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeProcedureTemplatesResponse` """ try: params = request._serialize() body = self.call("DescribeProcedureTemplates", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeProcedureTemplatesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeTaskDetail(self, request): """通过任务 ID 查询任务的执行状态和结果的详细信息(最多可以查询3天之内提交的任务)。 :param request: 调用DescribeTaskDetail所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeTaskDetailRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeTaskDetailResponse` """ try: params = request._serialize() body = self.call("DescribeTaskDetail", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeTaskDetailResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeTasks(self, request): """* 该接口用于查询任务列表; * 当列表数据比较多时,单次接口调用无法拉取整个列表,可通过 ScrollToken 参数,分批拉取; * 只能查询到最近三天(72 小时)内的任务。 :param request: 调用DescribeTasks所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeTasksRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeTasksResponse` """ try: params = request._serialize() body = self.call("DescribeTasks", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeTasksResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeTranscodeTemplates(self, request): """根据转码模板唯一标识,获取转码模板详情列表。返回结果包含符合条件的所有用户自定义模板及[系统预置转码模板](https://cloud.tencent.com/document/product/266/11701#.E9.A2.84.E7.BD.AE.E8.BD.AC.E7.A0.81.E6.A8.A1.E6.9D.BF)。 :param request: 调用DescribeTranscodeTemplates所需参数的结构体。 :type request: :class:`tencentcloud.vod.v20180717.models.DescribeTranscodeTemplatesRequest` :rtype: :class:`tencentcloud.vod.v20180717.models.DescribeTranscodeTemplatesResponse` """ try: params = request._serialize() body = self.call("DescribeTranscodeTemplates", params) response = json.loads(body) if "Error" not in response["Response"]: model = models.DescribeTranscodeTemplatesResponse() model._deserialize(response["Response"]) return model else: code = response["Response"]["Error"]["Code"] message = response["Response"]["Error"]["Message"] reqid = response["Response"]["RequestId"] raise TencentCloudSDKException(code, message, reqid) except Exception as e: if isinstance(e, TencentCloudSDKException): raise else: raise TencentCloudSDKException(e.message, e.message) def DescribeWatermarkTemplates(self, request): """查询用户自定义水印模板,支持根据条件,分页查询。 :param request: 调用DescribeWatermarkTemplates所需参数的结构体。
import abc import os import yaml import json import pprint import shutil import tarfile import tempfile import requests from pathlib import Path from typing import Dict, Sequence, Union, TYPE_CHECKING from dotenv import load_dotenv from agentos.identifiers import ( ComponentIdentifier, RunIdentifier, RepoIdentifier, RunCommandIdentifier, ) from agentos.specs import ( flatten_spec, RepoSpec, ComponentSpec, NestedComponentSpec, RunSpec, RunCommandSpec, ) if TYPE_CHECKING: from agentos.component import Component from agentos.run import Run # add USE_LOCAL_SERVER=True to .env to talk to local server load_dotenv() AOS_WEB_BASE_URL = "https://aos-web.herokuapp.com" if os.getenv("USE_LOCAL_SERVER", False) == "True": AOS_WEB_BASE_URL = "http://localhost:8000" AOS_WEB_API_EXTENSION = "/api/v1" AOS_WEB_API_ROOT = f"{AOS_WEB_BASE_URL}{AOS_WEB_API_EXTENSION}" class Registry(abc.ABC): def __init__(self, base_dir: str = None): self.base_dir = ( base_dir if base_dir else "." ) # Used for file-backed Registry types. @staticmethod def from_dict(input_dict: Dict) -> "Registry": return InMemoryRegistry(input_dict) @staticmethod def from_yaml(yaml_file: str) -> "Registry": with open(yaml_file) as file_in: config = yaml.safe_load(file_in) return InMemoryRegistry(config, base_dir=str(Path(yaml_file).parent)) @classmethod def from_default(cls): if not hasattr(cls, "_default_registry"): cls._default_registry = WebRegistry(AOS_WEB_API_ROOT) return cls._default_registry @abc.abstractmethod def to_dict(self) -> Dict: raise NotImplementedError def to_yaml(self, filename: str) -> None: with open(filename, "w") as file: yaml.dump(self.to_dict(), file) @abc.abstractmethod def get_component_specs( self, filter_by_name: str = None, filter_by_version: str = None ) -> NestedComponentSpec: """ Return dictionary of component specs in this Registry, optionally filtered by name and/or version; or None if none are found. Each Component Spec is itself a dict mapping ComponentIdentifier to a dict of properties that define the Component. Optionally, filter the list to match all filter strings provided. Filters can be provided on name, version, or both. If this registry contains zero component specs that match the filter criteria (if any), then an empty dictionary is returned. If ``filter_by_name`` and ``filter_by_version`` are provided, then 0 or 1 components will be returned. :param filter_by_name: return only components with this name. :param filter_by_version: return only components with this version. :param include_id_in_contents: add ``name`` and ``version`` fields to innermost dict of the ComponentSpec Dict. This denormalizes the spec by duplicating the ``name`` and ``version`` which are already included via the ComponentIdentifier Dict key. :returns: A dictionary of components in this registry, optionally filtered by name, version, or both. If no matching components are found, an empty dictionary is returned. """ raise NotImplementedError def get_component_spec( self, name: str, version: str = None, flatten: bool = False, error_if_not_found: bool = True, ) -> ComponentSpec: """ Returns the component spec with ``name`` and ``version``, if it exists, or raise an Error if it does not. A component's name and version are defined as its identifier's name and version. Registries are not allowed to contain multiple Components with the same identifier. The Registry abstract base class does not enforce that all Components have a version (version can be None) though some sub-classes, such as web service backed registries, may choose to enforce that constraint. When version is unspecified or None, this function assumes that a Component ``c`` exists where ``c.name == name`` and ``c.version is None``, and throws an error otherwise. Subclasses of Registry may choose to provide their own (more elaborate) semantics for "default components". E.g., since WebRegistry does not allow non-versioned components, it defines its own concept of a default component by maintaining a separate map from component name to a specific version of the component, and it allows that mapping to be updated by users. :param name: The name of the component to fetch. :param version: Optional version of the component to fetch. :param flatten: If True, flatten the outermost 2 layers of nested dicts into a single dict. In an unflattened component spec, the outermost dict is from identifier (which is a string in the format of name[==version]) Component component properties (class_name, repo, etc.). In a flattened Component spec, the name and version are included in the same dictionary as the class_name, repo, dependencies, etc. :param error_if_not_found: Set to False to return an empty dict in the case that a matching component is not found in this registry. :returns: a ComponentSpec (i.e. a dict) matching the filter criteria provided, else throw an error. """ components = self.get_component_specs(name, version) if len(components) == 0: if error_if_not_found: raise LookupError( f"This registry does not contain any components that " f"match your filter criteria: name:'{name}', " f"version:'{version}'." ) else: return {} if len(components) > 1: versions = [ ComponentIdentifier.from_str(c_id).version for c_id in components.keys() ] version_str = "\n - ".join(versions) raise LookupError( f"This registry contains more than one component with " f"the name {name}. Please specify one of the following " f"versions:\n - {version_str}" ) return flatten_spec(components) if flatten else components def get_component_spec_by_id( self, identifier: Union[ComponentIdentifier, str], flatten: bool = False, ) -> ComponentSpec: identifier = ComponentIdentifier.from_str(str(identifier)) return self.get_component_spec( identifier.name, identifier.version, flatten=flatten ) @abc.abstractmethod def get_repo_spec( self, repo_id: RepoIdentifier, flatten: bool = False ) -> RepoSpec: raise NotImplementedError @abc.abstractmethod def get_run_spec( self, run_id: RunIdentifier, flatten: bool = False ) -> RunSpec: raise NotImplementedError def get_run_command_spec( self, run_command_id: RunCommandIdentifier, flatten: bool = False ) -> RunCommandSpec: raise NotImplementedError @abc.abstractmethod def get_registries(self) -> Sequence: raise NotImplementedError @abc.abstractmethod def add_component_spec(self, component_spec: NestedComponentSpec) -> None: """ Adds a component spec to this registry. *This does not add any Registry Objects* to this registry. Those must be handled explicitely. Typically, to register a component, it's easier to use the higher level function Component.register(registry). :param component_spec: The ``ComponentSpec`` to register. """ raise NotImplementedError def add_component( self, component: "Component", recurse: bool = True, force: bool = False ) -> None: component.to_registry(self, recurse=recurse, force=force) @abc.abstractmethod def add_repo_spec(self, repo_spec: RepoSpec) -> None: raise NotImplementedError @abc.abstractmethod def add_run_spec(self, run_spec: RunSpec) -> None: raise NotImplementedError @abc.abstractmethod def add_run_command_spec(self, run_command_spec: RunCommandSpec) -> None: raise NotImplementedError class InMemoryRegistry(Registry): """ A mutable in-memory registry. """ def __init__(self, input_dict: Dict = None, base_dir: str = None): super().__init__(base_dir) self._registry = input_dict if input_dict else {} if "components" not in self._registry.keys(): self._registry["components"] = {} if "repos" not in self._registry.keys(): self._registry["repos"] = {} if "runs" not in self._registry.keys(): self._registry["runs"] = {} if "run_commands" not in self._registry.keys(): self._registry["run_commands"] = {} if "registries" not in self._registry.keys(): self._registry["registries"] = [] def get_component_specs( self, filter_by_name: str = None, filter_by_version: str = None ) -> NestedComponentSpec: if filter_by_name or filter_by_version: try: components = {} for k, v in self._registry["components"].items(): candidate_id = ComponentIdentifier.from_str(k) passes_filter = True if filter_by_name and candidate_id.name != filter_by_name: passes_filter = False if ( filter_by_version and candidate_id.version != filter_by_version ): passes_filter = False if passes_filter: components[k] = v return components except KeyError: return {} return self._registry["components"] def get_repo_spec( self, repo_id: RepoIdentifier, flatten: bool = False ) -> "RepoSpec": return self._get_spec(repo_id, "repos", flatten) def get_run_spec( self, run_id: RunIdentifier, flatten: bool = False ) -> RunSpec: return self._get_spec(run_id, "runs", flatten) def get_run_command_spec( self, run_command_id: RunCommandIdentifier, flatten: bool = False ) -> RunCommandSpec: return self._get_spec(run_command_id, "run_commands", flatten) def _get_spec(self, identifier: str, spec_type: str, flatten: bool): """ Factor out common functionality for fetching specs from the internal represention of them. Because Components are special, (i.e., their identifiers can be versioned) they are handled differently. """ assert spec_type in ["repos", "runs", "run_commands"] spec = {identifier: self._registry[spec_type][identifier]} return flatten_spec(spec) if flatten else spec def get_registries(self) -> Sequence[Registry]: return self._registry["registries"] def add_component_spec(self, component_spec: NestedComponentSpec) -> None: self._registry["components"].update(component_spec) def add_repo_spec(self, repo_spec: RepoSpec) -> None: self._registry["repos"].update(repo_spec) def add_run_spec(self, run_spec: RunSpec) -> None: self._registry["runs"].update(run_spec) def add_run_command_spec(self, run_command_spec: RunCommandSpec) -> None: self._registry["run_commands"].update(run_command_spec) def to_dict(self) -> Dict: return self._registry class WebRegistry(Registry): """ A web-server backed Registry. """ def __init__(self, root_api_url: str, base_dir: str = None): self.root_api_url = root_api_url self.base_dir = ( base_dir if base_dir else "." ) # Used for file-backed Registry types. @staticmethod def _check_response(response): if not response.ok: print("failed response: ") print(response.content) content = json.loads(response.content) if type(content) == list: content = content[0] raise Exception(content) def get_component_specs( self, filter_by_name: str = None, filter_by_version: str = None ) -> NestedComponentSpec: url_filter_str = "" if filter_by_name: url_filter_str += f"name={filter_by_name}" if filter_by_version: if url_filter_str: url_filter_str += "&" url_filter_str += f"version={filter_by_version}" if url_filter_str: url_filter_str = f"?{url_filter_str}" component_url = f"{self.root_api_url}/components{url_filter_str}" print(f"trying {component_url}") component_response = requests.get(component_url) assert component_response.status_code == 200 json_results = json.loads(component_response.content) component_specs = {} for c_dict in json_results["results"]: identifier = f"{c_dict['name']}=={c_dict['version']}" component_specs[identifier] = { "repo":
in the template). """ return pulumi.get(self, "parameters") @parameters.setter def parameters(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "parameters", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: """ The project in which the resource belongs. If it is not provided, the provider project is used. """ return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter def region(self) -> Optional[pulumi.Input[str]]: """ The region in which the created job should run. """ return pulumi.get(self, "region") @region.setter def region(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "region", value) @property @pulumi.getter(name="serviceAccountEmail") def service_account_email(self) -> Optional[pulumi.Input[str]]: """ The Service Account email used to create the job. """ return pulumi.get(self, "service_account_email") @service_account_email.setter def service_account_email(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "service_account_email", value) @property @pulumi.getter def state(self) -> Optional[pulumi.Input[str]]: """ The current state of the resource, selected from the [JobState enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobState) """ return pulumi.get(self, "state") @state.setter def state(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "state", value) @property @pulumi.getter def subnetwork(self) -> Optional[pulumi.Input[str]]: """ The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"` """ return pulumi.get(self, "subnetwork") @subnetwork.setter def subnetwork(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "subnetwork", value) @property @pulumi.getter(name="tempGcsLocation") def temp_gcs_location(self) -> Optional[pulumi.Input[str]]: """ A writeable location on GCS for the Dataflow job to dump its temporary data. """ return pulumi.get(self, "temp_gcs_location") @temp_gcs_location.setter def temp_gcs_location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "temp_gcs_location", value) @property @pulumi.getter(name="templateGcsPath") def template_gcs_path(self) -> Optional[pulumi.Input[str]]: """ The GCS path to the Dataflow job template. """ return pulumi.get(self, "template_gcs_path") @template_gcs_path.setter def template_gcs_path(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "template_gcs_path", value) @property @pulumi.getter(name="transformNameMapping") def transform_name_mapping(self) -> Optional[pulumi.Input[Mapping[str, Any]]]: """ Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update. >>>>>>> v4.1.0 """ return pulumi.get(self, "transform_name_mapping") @transform_name_mapping.setter def transform_name_mapping(self, value: Optional[pulumi.Input[Mapping[str, Any]]]): pulumi.set(self, "transform_name_mapping", value) @property @pulumi.getter def type(self) -> Optional[pulumi.Input[str]]: """ The type of this job, selected from the [JobType enum](https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs#Job.JobType) """ return pulumi.get(self, "type") @type.setter def type(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "type", value) @property @pulumi.getter def zone(self) -> Optional[pulumi.Input[str]]: """ The zone in which the created job should run. If it is not provided, the provider zone is used. """ return pulumi.get(self, "zone") @zone.setter def zone(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "zone", value) class Job(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, additional_experiments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, enable_streaming_engine: Optional[pulumi.Input[bool]] = None, ip_configuration: Optional[pulumi.Input[str]] = None, kms_key_name: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Mapping[str, Any]]] = None, machine_type: Optional[pulumi.Input[str]] = None, max_workers: Optional[pulumi.Input[int]] = None, name: Optional[pulumi.Input[str]] = None, network: Optional[pulumi.Input[str]] = None, on_delete: Optional[pulumi.Input[str]] = None, parameters: Optional[pulumi.Input[Mapping[str, Any]]] = None, project: Optional[pulumi.Input[str]] = None, region: Optional[pulumi.Input[str]] = None, service_account_email: Optional[pulumi.Input[str]] = None, subnetwork: Optional[pulumi.Input[str]] = None, temp_gcs_location: Optional[pulumi.Input[str]] = None, template_gcs_path: Optional[pulumi.Input[str]] = None, transform_name_mapping: Optional[pulumi.Input[Mapping[str, Any]]] = None, zone: Optional[pulumi.Input[str]] = None, __props__=None): """ Creates a job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for [Beam](https://beam.apache.org) and [Dataflow](https://cloud.google.com/dataflow/). ## Example Usage ```python import pulumi import pulumi_gcp as gcp big_data_job = gcp.dataflow.Job("bigDataJob", parameters={ "baz": "qux", "foo": "bar", }, temp_gcs_location="gs://my-bucket/tmp_dir", template_gcs_path="gs://my-bucket/templates/template_file") ``` ### Streaming Job ```python import pulumi import pulumi_gcp as gcp topic = gcp.pubsub.Topic("topic") bucket1 = gcp.storage.Bucket("bucket1", location="US", force_destroy=True) bucket2 = gcp.storage.Bucket("bucket2", location="US", force_destroy=True) pubsub_stream = gcp.dataflow.Job("pubsubStream", template_gcs_path="gs://my-bucket/templates/template_file", temp_gcs_location="gs://my-bucket/tmp_dir", enable_streaming_engine=True, parameters={ "inputFilePattern": bucket1.url.apply(lambda url: f"{url}/*.json"), "outputTopic": topic.id, }, transform_name_mapping={ "name": "test_job", "env": "test", }, on_delete="cancel") ``` ## Note on "destroy" / "apply" There are many types of Dataflow jobs. Some Dataflow jobs run constantly, getting new data from (e.g.) a GCS bucket, and outputting data continuously. Some jobs process a set amount of data then terminate. All jobs can fail while running due to programming errors or other issues. In this way, Dataflow jobs are different from most other Google resources. The Dataflow resource is considered 'existing' while it is in a nonterminal state. If it reaches a terminal state (e.g. 'FAILED', 'COMPLETE', 'CANCELLED'), it will be recreated on the next 'apply'. This is as expected for jobs which run continuously, but may surprise users who use this resource for other kinds of Dataflow jobs. A Dataflow job which is 'destroyed' may be "cancelled" or "drained". If "cancelled", the job terminates - any data written remains where it is, but no new data will be processed. If "drained", no new data will enter the pipeline, but any data currently in the pipeline will finish being processed. The default is "drain". When `on_delete` is set to `"drain"` in the configuration, you may experience a long wait for your `pulumi destroy` to complete. ## Import This resource does not support import. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[str]]] additional_experiments: List of experiments that should be used by the job. An example value is `["enable_stackdriver_agent_metrics"]`. :param pulumi.Input[bool] enable_streaming_engine: Enable/disable the use of [Streaming Engine](https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#streaming-engine) for the job. Note that Streaming Engine is enabled by default for pipelines developed against the Beam SDK for Python v2.21.0 or later when using Python 3. :param pulumi.Input[str] ip_configuration: The configuration for VM IPs. Options are `"WORKER_IP_PUBLIC"` or `"WORKER_IP_PRIVATE"`. :param pulumi.Input[str] kms_key_name: The name for the Cloud KMS key for the job. Key format is: `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY` :param pulumi.Input[Mapping[str, Any]] labels: User labels to be specified for the job. Keys and values should follow the restrictions specified in the [labeling restrictions](https://cloud.google.com/compute/docs/labeling-resources#restrictions) page. **NOTE**: Google-provided Dataflow templates often provide default labels that begin with `goog-dataflow-provided`. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply. <<<<<<< HEAD :param pulumi.Input[str] machine_type: The machine type to use for the job. :param pulumi.Input[int] max_workers: The number of workers permitted to work on the job. More workers may improve processing speed at additional cost. :param pulumi.Input[str] name: A unique name for the resource, required by Dataflow. :param pulumi.Input[str] network: The network to which VMs will be assigned. If it is not provided, "default" will be used. :param pulumi.Input[str] on_delete: One of "drain" or "cancel". Specifies behavior of deletion during `pulumi destroy`. See above note. :param pulumi.Input[Mapping[str, Any]] parameters: Key/Value pairs to be passed to the Dataflow job (as used in the template). :param pulumi.Input[str] project: The project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] region: The region in which the created job should run. :param pulumi.Input[str] service_account_email: The Service Account email used to create the job. :param pulumi.Input[str] subnetwork: The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK". If the [subnetwork is located in a Shared VPC network](https://cloud.google.com/dataflow/docs/guides/specifying-networks#shared), you must use the complete URL. For example `"googleapis.com/compute/v1/projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET_NAME"` :param pulumi.Input[str] temp_gcs_location: A writeable location on GCS for the Dataflow job to dump its temporary data. :param pulumi.Input[str] template_gcs_path: The GCS path to the Dataflow job template. :param pulumi.Input[Mapping[str, Any]] transform_name_mapping: Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job. This field is not used outside of update. >>>>>>> v4.1.0 :param pulumi.Input[str] zone: The zone in which the created job should run. If it is not provided, the provider zone is used. """ ... @overload def __init__(__self__, resource_name: str, args: JobArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates a job on Dataflow, which is an implementation of Apache Beam running on Google Compute Engine. For more information see the official documentation for [Beam](https://beam.apache.org) and [Dataflow](https://cloud.google.com/dataflow/). ## Example Usage ```python import pulumi import pulumi_gcp as gcp big_data_job = gcp.dataflow.Job("bigDataJob", parameters={ "baz": "qux", "foo": "bar", }, temp_gcs_location="gs://my-bucket/tmp_dir", template_gcs_path="gs://my-bucket/templates/template_file") ``` ### Streaming Job ```python import pulumi import pulumi_gcp as gcp topic = gcp.pubsub.Topic("topic") bucket1 = gcp.storage.Bucket("bucket1", location="US", force_destroy=True) bucket2 = gcp.storage.Bucket("bucket2", location="US", force_destroy=True) pubsub_stream = gcp.dataflow.Job("pubsubStream", template_gcs_path="gs://my-bucket/templates/template_file", temp_gcs_location="gs://my-bucket/tmp_dir", enable_streaming_engine=True,
<reponame>rubik/pyg import re import os import sys import copy import glob import shutil import atexit import tarfile import zipfile import urllib2 import urlparse import functools import ConfigParser import pkg_resources import multiprocessing from pkgtools.pypi import PyPIJson from pkgtools.pkg import SDist, Develop, Installed from pyg.core import * from pyg.web import ReqManager, request, download from pyg.req import Requirement from pyg.locations import EASY_INSTALL, USER_SITE, BIN, BIN2, ALL_SITE_PACKAGES from pyg.utils import TempDir, ZipFile, File, name, ext, is_installed, is_windows, \ unpack, call_setup, print_output, installed_distributions from pyg.log import logger from pyg.parser.parser import init_parser __all__ = ['QSIZE', 'Installer', 'Uninstaller', 'Updater', 'Bundler'] # Unfortunately multiprocessing does not allow Values to be inside of a class, # so we have to keep it global... # To keep track of the checked packages QSIZE = multiprocessing.Value('i', 1) class Installer(object): def __init__(self, req): self.upgrading = False if is_installed(req): self.upgrading = True if not args_manager['install']['upgrade']: logger.info('{0} is already installed, use -U, --upgrade to upgrade', req) raise AlreadyInstalled ## We don't set args_manager['upgrade'] = False ## because we want to propagate it to dependencies logger.info('{0} is already installed, upgrading...', req) self.req = req @staticmethod def _install_deps(rs, name=None, updater=None): if not rs: return if args_manager['install']['no_deps']: logger.info('Skipping dependencies for {0}', name) logger.indent = 8 for req in rs: logger.info(req) logger.indent = 0 return logger.info('Installing dependencies...') dep_error = False newly_installed = [] for req in rs: if is_installed(req) and not args_manager['install']['upgrade_all']: logger.indent = 8 logger.info('{0} is already installed, use -A, --upgrade-all to upgrade dependencies', req) continue logger.indent = 0 logger.info('Installing {0} (from {1})', req, rs.comes_from) logger.indent = 8 try: Installer(req).install() newly_installed.append(req) except AlreadyInstalled: continue except InstallationError: dep_error = True logger.error('Error: {0} has not been installed correctly', req) continue logger.indent = 0 if dep_error: if updater: for req in newly_installed: updater.restore_files(req) updater.remove_files(rs.comes_from.name) updater.restore_files(rs.comes_from.name) logger.error("{0}'s dependencies installation failed", rs.comes_from.name, exc=InstallationError) else: logger.success('Finished installing dependencies for {0}', rs.comes_from) def install(self): try: r = Requirement(self.req) updater = FileManager() if self.upgrading: updater.remove_files(self.req) r.install() # Now let's install dependencies Installer._install_deps(r.reqset, r.name, updater) logger.success('{0} installed successfully', r.name) except (KeyboardInterrupt, Exception) as e: if logger.level == logger.DEBUG: raise msg = str(e) if isinstance(e, KeyboardInterrupt): logger.warn('Process interrupted...') elif isinstance(e, urllib2.HTTPError): logger.error('HTTP Error: {0}', msg[msg.find('HTTP Error') + 11:]) else: logger.warn('Error: An error occurred during the {0} of {1}: {2}', 'upgrading' if self.upgrading else 'installation', self.req, msg) if self.upgrading: logger.info('Restoring files...') updater.restore_files(self.req) else: logger.info('Removing broken files...') Uninstaller(self.req).uninstall() logger.error(msg, exc=InstallationError) @staticmethod def from_req_file(filepath): path = os.path.abspath(filepath) not_installed = set() parser = init_parser() with open(path) as f: logger.info('{0}:', path) for line in f: line = line.strip() if line.startswith('#'): logger.debug('debug: Comment found: {0}', line) continue try: logger.indent = 8 logger.info('Installing: {0}', line) logger.indent = 16 parser.dispatch(argv=['install'] + line.split()) except AlreadyInstalled: continue except InstallationError: not_installed.add(line) except SystemExit as e: if e.code != 0: logger.warn('W: {0} tried to raise SystemExit: skipping installation') else: logger.info('{0} tried to raise SystemExit, but the exit code was 0') if not_installed: logger.warn('These packages have not been installed:') logger.indent = 8 for req in not_installed: logger.warn(req) logger.indent = 0 raise InstallationError() @staticmethod def from_file(filepath, packname=None): packname = packname or os.path.basename(filepath).split('-')[0] reqset = ReqSet(Requirement(packname)) e = ext(filepath) path = os.path.abspath(filepath) if e in ('.tar.gz', '.tar.bz2', '.zip'): installer = Archive(open(path), e, packname, reqset) elif e in ('.pybundle', '.pyb'): installer = Bundle(filepath) elif e == '.egg': installer = Egg(open(path), path, reqset) elif e in ('.exe', '.msi') and is_windows(): installer = Binary(open(path), e, packname) else: if tarfile.is_tarfile(path): installer = Archive(open(path), None, packname, reqset) elif zipfile.is_zipfile(path): installer = Archive(open(path), '.zip', packname, reqset) else: logger.fatal('Error: Cannot install {0}: unknown filetype', packname, exc=InstallationError) installer.install() Installer._install_deps(reqset, packname) logger.success('{0} installed successfully', packname) @staticmethod def from_dir(path, name=None): name = name or os.path.basename(path) reqset = ReqSet(Requirement(name)) try: with TempDir() as tempdir: logger.info('Installing {0}', name) Dir(path, name, tempdir, reqset).install() except Exception as e: try: msg = e.args[0] except IndexError: msg = repr(e) logger.fatal('Error: {0}: cannot install the package', msg, exc=InstallationError) else: if reqset: Installer._install_deps(reqset) logger.success('{0} installed successfully', name) @staticmethod def from_url(url, packname=None): with TempDir() as tempdir: packname = packname or urlparse.urlsplit(url).path.split('/')[-1] if '#egg=' in url: url, packname = url.split('#egg=') path = os.path.join(tempdir, packname) download(url, 'Downloading {0}'.format(packname), False) with open(path, 'w') as f: f.write(request(url)) Installer.from_file(path, packname) class Uninstaller(object): def __init__(self, packname, yes=False, local=False): self.name = packname self.yes = yes self.local = local def _old_find_files(self): _un_re = re.compile(r'{0}(-(\d\.?)+(\-py\d\.\d)?\.(egg|egg\-info))?$'.format(self.name), re.I) _un2_re = re.compile(r'{0}(?:(\.py|\.pyc))'.format(self.name), re.I) _un3_re = re.compile(r'{0}.*\.so'.format(self.name), re.I) _uninstall_re = [_un_re, _un2_re, _un3_re] to_del = set() try: dist = pkg_resources.get_distribution(self.name) except pkg_resources.DistributionNotFound: logger.debug('debug: Distribution not found: {0}', self.name) ## Create a fake distribution ## In Python2.6 we can only use site.USER_SITE class FakeDist(object): def __init__(self, o): self._orig_o = o def __getattr__(self, a): if a == 'location': return USER_SITE elif a == 'egg_name': return (lambda *a: self._orig_o.name + '.egg') return (lambda *a: False) dist = FakeDist(self) pkg_loc = dist.location glob_folder = False if pkg_loc in ALL_SITE_PACKAGES: # try to detect the real package location if dist.has_metadata('top_level.txt'): pkg_loc = os.path.join( pkg_loc, dist.get_metadata_lines('top_level.txt').next()) else: glob_folder = True # detect egg-info location _base_name = dist.egg_name().split('-') for n in range(len(_base_name) + 1): egg_info_dir = os.path.join( dist.location, '-'.join(_base_name[:-n if n else None]) + '.egg-info' ) if os.path.exists(egg_info_dir): try: for file in os.listdir(egg_info_dir): if any(u_re.match(file) for u_re in _uninstall_re): to_del.add(os.path.join(egg_info_dir, file)) to_del.add(egg_info_dir) # not a directory, like bzr-version.egg-info except OSError: logger.debug('debug: not a directory: {0}', egg_info_dir) continue break if glob_folder: # track individual files inside that folder try: for file in os.listdir(pkg_loc): if any(u_re.match(file) for u_re in _uninstall_re): to_del.add(os.path.join(pkg_loc, file)) except OSError: logger.debug('debug: OSError when trying to listdir {0}', pkg_loc) else: # specific folder (non site-packages) if os.path.isdir(pkg_loc): to_del.add(pkg_loc) # finding package's files into that folder if os.path.isdir(pkg_loc): for file in os.listdir(pkg_loc): if any(u_re.match(file) for u_re in _uninstall_re): to_del.add(os.path.join(pkg_loc, file)) else: # single file installation for ext in '.py .pyc .pyo'.split(): _p = pkg_loc + ext if os.path.exists(_p): to_del.add(_p) ## Checking for package's scripts... if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): for script in dist.metadata_listdir('scripts'): to_del.add(os.path.join(BIN, script)) ## If we are on Windows we have to remove *.bat files too if is_windows(): to_del.add(os.path.join(BIN, script) + '.bat') ## Very important! ## We want to remove console scripts too. if dist.has_metadata('entry_points.txt'): config = ConfigParser.ConfigParser() config.readfp(File(dist.get_metadata_lines('entry_points.txt'))) win32 = sys.platform == 'win32' if config.has_section('console_scripts'): for name, value in config.items('console_scripts'): n = os.path.join(BIN, name) ## Searches in the local path if not os.path.exists(n) and n.startswith('/usr/bin'): n = os.path.join('/usr/local/bin', name) ## Check existance before adding to `to-del` set. if os.path.exists(n): to_del.add(n) elif win32 and os.path.exists(n + '.exe'): to_del.add(n + '.exe') to_del.add(n + '.exe.manifest') to_del.add(n + '-script.py') ## Last check to ensure we don't remove site directories for path in copy.copy(to_del): if path in ALL_SITE_PACKAGES: to_del.remove(path) return to_del # this decorator filters out local paths # added to avoid code duplication into find_files() def _filter_locals(meth): def wrapper(self): to_del = meth(self) bin = (BIN,) if BIN2 is None else (BIN, BIN2) local = set(path for path in to_del if not path.startswith(tuple(ALL_SITE_PACKAGES) + bin)) return to_del.difference(local), local return wrapper @_filter_locals def find_files(self): try: files = Installed(self.name).installed_files() except: return self._old_find_files() to_del = files['lib'] for name in files['bin']: bin = os.path.join(BIN, name) if not os.path.exists(bin) and bin.startswith('/usr/bin'): bin = os.path.join('/usr/local/bin', name) if os.path.exists(bin): to_del.add(bin) if sys.platform == 'win32' and os.path.exists(bin + '.exe'): to_del.add(bin + '.exe') to_del.add(bin + '.exe.manifest') to_del.add(bin + '-script.py') return to_del def uninstall(self): def sort_paths(p): return set(sorted(p, key=lambda i: len(i.split(os.sep)), reverse=True)) path_re = re.compile(r'\./{0}-[\d\w\.]+-py\d\.\d.egg'.format(self.name), re.I) path_re2 = re.compile(r'\.{0}'.format(self.name), re.I) to_del, local = map(sort_paths, self.find_files()) if not to_del: if local and not self.local: logger.info('Local files (use -l, --local to delete):') logger.indent += 8 for d in local: logger.info(d) logger.indent -= 8 return else: logger.error('{0}: did not find any files to delete', self.name, exc=PygError) logger.info('Uninstalling {0}', self.name) logger.indent += 8 to_del = to_del.union(local if self.local else ()) for d in to_del: logger.info(d) if not self.local and local: logger.indent -= 8 logger.info('Local files (use -l, --local to delete):') logger.indent += 8 for d in local: logger.info(d) logger.indent -= 8 do_it = logger.ask('Proceed', bool=('remove files', 'cancel'), dont_ask=self.yes) if do_it: for d in to_del: try: logger.verbose('Deleting: {0}', d) shutil.rmtree(d) except OSError: ## It is not a directory try: os.remove(d) except OSError: logger.error('Error: cannot delete {0}', d) logger.verbose('Removing egg path from easy_install.pth...') with open(EASY_INSTALL) as f: lines = f.readlines()
<filename>code/main.py # import basic libs import os,sys import six import math import time import shutil import random import datetime import warnings import argparse import numpy as np import matplotlib.pyplot as plt import hiddenlayer as hl from collections import OrderedDict # import pytorch libs import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim import torch.multiprocessing as mp import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets #import torchvision.models as models # import customized libs #from model import * from utils import * import model as models model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) ''' class MyDataParallel(nn.DataParallel): def __getattr__(self, name): print(self) print(type(self)) m=getattr(self,'module') print(m) raise ValueError('data parrallel') return getattr(module,name) ''' best_acc1 = 0 best_epo1 = 0 parallel = False def main(argv): parser = argparse.ArgumentParser(description='PyTorch Classification Training') # Required arguments: input and output files. parser.add_argument( "data_root", help = "Must give the dataset root to do the training (absolute path)." ) parser.add_argument( "config_file", help = "Must give the config file of this experiment." ) parser.add_argument( "output_dir", help = "Must give the path to save output logfile and other data." ) parser.add_argument( "--gpu", default=None, type=int, help = "Set the only visible gpu id for single gpu running." ) parser.add_argument( "--gpu_env", default=None, type=str, help = "Set total visible gpus environment." ) parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training' ) ''' parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('-p', '--print-freq', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--world-size', default=-1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=-1, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://172.16.17.32:23456', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ') parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') parser.add_argument('--multiprocessing-distributed', action='store_true', help='Use multi-processing distributed training to launch ' 'N processes per node, which has N GPUs. This is the ' 'fastest way to use PyTorch for either single node or ' 'multi node data parallel training') ''' #-----------------------LOAD AND CHECK PARAMETER---------------------# args = parser.parse_args() print("cfg_file name:", args.config_file) cfg = create_default_cfg() update_cfg(cfg, args.config_file) cfg.data_root = args.data_root cfg.output_dir = args.output_dir cfg.gpu = args.gpu cfg.env = args.gpu_env cfg.multiprocessing_distributed = args.multiprocessing_distributed print_cfg(cfg) net_cfg = cfg.NETWORK train_cfg = cfg.TRAIN data_cfg = cfg.DATA log_cfg = cfg.LOG # check learning rate adjustment parameters are valid if isinstance(train_cfg.learning_rate, list) & (isinstance(train_cfg.decay_step, list)): if len(train_cfg.learning_rate) == len(train_cfg.decay_step): if len(train_cfg.learning_rate) == 1: warnings.warn('You are using only one learning rate during the training') else: print('Using learning rate adjustment') else: raise ValueError('The length of learning rate and decay step are not equal') else: raise ValueError('The learning rate or decay step is not list') # check random seed and record if cfg.random_seed is not None: random.seed(cfg.random_seed) torch.manual_seed(cfg.random_seed) cudnn.deterministic = True warnings.warn('You have chosen to seed training. ' 'This will turn on the CUDNN deterministic setting, ' 'which can slow down your training considerably! ' 'You may see unexpected behavior when restarting ' 'from checkpoints.') # check and set whole system if args.gpu_env is not None: os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_env print("You have chosen {} specific GPUs:".format(torch.cuda.device_count())) print(args.gpu_env) if args.gpu is not None: # set this by main_worker bellow warnings.warn('You have chosen a specific GPU. This will completely ' 'disable data parallelism.') else: pass if cfg.dist_url == "env://" and cfg.world_size == -1: cfg.world_size = int(os.environ["WORLD_SIZE"]) cfg.distributed = cfg.world_size > 1 or cfg.multiprocessing_distributed ngpus_per_node = torch.cuda.device_count() if cfg.multiprocessing_distributed: # Since we have ngpus_per_node processes per node, the total world_size # needs to be adjusted accordingly cfg.world_size = ngpus_per_node * cfg.world_size # Use torch.multiprocessing.spawn to launch distributed processes: the # main_worker process function mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, cfg)) else: # Simply call main_worker function main_worker(args.gpu, ngpus_per_node, cfg) def main_worker(gpu, ngpus_per_node, cfg): #-----------------------SET PARAMETERS---------------------# global best_acc1 global best_epo1 global parallel cfg.gpu = gpu net_cfg = cfg.NETWORK train_cfg = cfg.TRAIN data_cfg = cfg.DATA log_cfg = cfg.LOG # record learning rate parameters LR_start = train_cfg.learning_rate[0] START_epoch = 0 #--------------CREATE MODEL AND SET PARALLEL TRAINING------------------# if net_cfg.pretrained: print("=> using pre-trained model '{}-{}'".format(net_cfg.arch, net_cfg.depth)) #model = resnet(net_cfg.depth, net_cfg.num_classes, net_cfg.q_cfg) model = models.__dict__[net_cfg.arch](depth = net_cfg.depth, num_classes=net_cfg.num_classes, q_cfg = net_cfg.q_cfg) else: print("=> creating model '{}-{}'".format(net_cfg.arch, net_cfg.depth)) #model = resnet(net_cfg.depth, net_cfg.num_classes, net_cfg.q_cfg) model = models.__dict__[net_cfg.arch](depth = net_cfg.depth, num_classes=net_cfg.num_classes, q_cfg = net_cfg.q_cfg) #print("debug gpu {}".format(cfg.gpu)) ''' if cfg.gpu is not None: print("Use GPU: {} for training".format(cfg.gpu)) else: print("Use GPU: {} for training".format(cfg.gpu)) ''' if cfg.distributed: if cfg.dist_url == "env://" and cfg.rank == -1: cfg.rank = int(os.environ["RANK"]) if cfg.multiprocessing_distributed: # For multiprocessing distributed training, rank needs to be the # global rank among all the processes cfg.rank = cfg.rank * ngpus_per_node + gpu dist.init_process_group(backend=cfg.dist_backend, init_method=cfg.dist_url, world_size=cfg.world_size, rank=cfg.rank) if cfg.distributed: # For multiprocessing distributed, DistributedDataParallel constructor # should always set the single device scope, otherwise, # DistributedDataParallel will use all available devices. if cfg.gpu is not None: torch.cuda.set_device(cfg.gpu) model.cuda(cfg.gpu) # When using a single GPU per process and per # DistributedDataParallel, we need to divide the batch size # ourselves based on the total number of GPUs we have data_cfg.batch_size = int(data_cfg.batch_size / ngpus_per_node) data_cfg.num_works = int(data_cfg.num_works / ngpus_per_node) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[cfg.gpu]) else: model.cuda() # DistributedDataParallel will divide and allocate batch_size to all # available GPUs if device_ids are not set model = torch.nn.parallel.DistributedDataParallel(model) elif cfg.gpu is not None: torch.cuda.set_device(cfg.gpu) model = model.cuda(cfg.gpu) else: # DataParallel will divide and allocate batch_size to all available GPUs print('DataParallel now.') #if net_cfg.arch.startswith('alexnet'): #or net_cfg.arch.startswith('vgg'): if net_cfg.arch.startswith('alexnet') or net_cfg.arch.startswith('vgg'): model.features = torch.nn.DataParallel(model.features) model.cuda() print('DataParallel done of classifier.') else: #model = MyDataParallel(model).cuda() parallel = True model = torch.nn.DataParallel(model).cuda() print('DataParallel done.') # measure the model print('Start summary') if data_cfg.dataset=='ILSVRC2012_img': df = torch_summarize_df(input_size=(3,224,224), model=model) else: df = torch_summarize_df(input_size=(3,32,32), model=model) print(df) print('Total params: %.3f M' % (sum(p.numel() for p in model.parameters())/1000000.0)) #----------------------DEFINE OPTIMIZER---------------------------# print("===start defining optimizer===") criterion = nn.CrossEntropyLoss().cuda(cfg.gpu) opt_Adam = optim.Adam( model.parameters(), lr = LR_start, betas=(0.9, 0.999), weight_decay = 1e-8) opt_SGD = optim.SGD ( model.parameters(), lr = LR_start, weight_decay=train_cfg.weight_decay) opt_SGDm = optim.SGD ( model.parameters(), lr = LR_start, momentum=train_cfg.momentum, weight_decay=train_cfg.weight_decay) opt_RMS = optim.RMSprop(model.parameters(), lr = LR_start, weight_decay=5e-4) if train_cfg.optimizer == "Adam": print(" Use optimizer Adam.") optimizer = opt_Adam elif train_cfg.optimizer == "SGDm": print(" Use optimizer SGDm.") optimizer = opt_SGDm else: print(" Use optimizer SGD.") optimizer = opt_SGD #----------------OPTIONALLY RESUME FROM A CHECKPOINT----------------# if train_cfg.resume is not None: if os.path.isfile(train_cfg.resume): print("=> loading checkpoint '{}'".format(train_cfg.resume)) checkpoint = torch.load(train_cfg.resume) START_epoch = checkpoint['epoch'] parallel = checkpoint['parallel'] best_acc1 = checkpoint['best_acc1'] best_epo1 = checkpoint['best_epo1'] print("=> checkpoint best '{}' @ {}".format(best_acc1,best_epo1)) if cfg.gpu is not None: # best_acc1 may be from a checkpoint from a different GPU best_acc1 = best_acc1.to(cfg.gpu) model.load_state_dict(checkpoint['state_dict']) #print('debug:',checkpoint['state_dict']) # there will be some bugs if we load optimizer for resuming with # different training stratgy, so we create optimizer when resume #optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(train_cfg.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(train_cfg.resume)) # quantize if START_epoch from checkpoint is larger than float_epoch if parallel and net_cfg.quantize and net_cfg.float_epoch < START_epoch: model.module.enable_quantize() elif net_cfg.quantize and net_cfg.float_epoch < START_epoch: model.enable_quantize() cudnn.benchmark = True #print(model) #print(torch_summarize(model)) #-----------------------DATA LOADING--------------------------# print("===start loading data===") traindir = os.path.join(cfg.data_root, data_cfg.dataset+'_train') valdir = os.path.join(cfg.data_root, data_cfg.dataset+'_val') #normalize = transforms.Normalize(mean=data_cfg.pixel_means, # std=data_cfg.pixel_stds) if data_cfg.dataset=='ILSVRC2012_img': normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])) else: # cifar 10 normalize = transforms.Normalize(mean=[0.4914, 0.4824, 0.4465], std=[0.2470, 0.2435, 0.2616]) train_dataset = datasets.CIFAR10( cfg.data_root+'cifar10', train=True, download=True, transform=transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ])) val_dataset = datasets.CIFAR10( cfg.data_root+'cifar10', train=False, download=False, transform=transforms.Compose([ transforms.ToTensor(), normalize, ])) if cfg.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None
stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.CryptoDragDrop.setObjectName("CryptoDragDrop") self.verticalLayout_16 = QtWidgets.QVBoxLayout(self.CryptoDragDrop) self.verticalLayout_16.setContentsMargins(0, 0, 0, 0) self.verticalLayout_16.setSpacing(0) self.verticalLayout_16.setObjectName("verticalLayout_16") self.CryptoDragDropLabel = QtWidgets.QLabel(self.CryptoDragDrop) font = QtGui.QFont() font.setFamily("Cabin Sketch") font.setPointSize(30) font.setBold(True) font.setItalic(True) font.setWeight(75) font.setKerning(True) font.setStyleStrategy(QtGui.QFont.PreferAntialias) self.CryptoDragDropLabel.setFont(font) self.CryptoDragDropLabel.setStyleSheet("border: 0px;\n" "") self.CryptoDragDropLabel.setAlignment(QtCore.Qt.AlignCenter) self.CryptoDragDropLabel.setObjectName("CryptoDragDropLabel") self.verticalLayout_16.addWidget(self.CryptoDragDropLabel) self.horizontalLayout_8.addWidget(self.CryptoDragDrop) self.verticalLayout_15.addWidget(self.CryptoDragDropFrame) self.CryptoFrameMiddle = QtWidgets.QFrame(self.CryptoPage) self.CryptoFrameMiddle.setStyleSheet("background-color: rgba(0, 0, 0, 0);") self.CryptoFrameMiddle.setFrameShape(QtWidgets.QFrame.NoFrame) self.CryptoFrameMiddle.setFrameShadow(QtWidgets.QFrame.Raised) self.CryptoFrameMiddle.setObjectName("CryptoFrameMiddle") self.verticalLayout_17 = QtWidgets.QVBoxLayout(self.CryptoFrameMiddle) self.verticalLayout_17.setContentsMargins(0, 0, 0, 0) self.verticalLayout_17.setSpacing(0) self.verticalLayout_17.setObjectName("verticalLayout_17") self.CryptoSaveLoc = QtWidgets.QFrame(self.CryptoFrameMiddle) self.CryptoSaveLoc.setFrameShape(QtWidgets.QFrame.NoFrame) self.CryptoSaveLoc.setFrameShadow(QtWidgets.QFrame.Raised) self.CryptoSaveLoc.setObjectName("CryptoSaveLoc") self.verticalLayout_18 = QtWidgets.QVBoxLayout(self.CryptoSaveLoc) self.verticalLayout_18.setContentsMargins(3, 4, 3, 4) self.verticalLayout_18.setSpacing(2) self.verticalLayout_18.setObjectName("verticalLayout_18") self.CryptoSaveLocBrowse = DragDropWidget(self.CryptoSaveLoc) self.CryptoSaveLocBrowse.setStyleSheet("border: 2px dashed #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.CryptoSaveLocBrowse.setObjectName("CryptoSaveLocBrowse") self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.CryptoSaveLocBrowse) self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_10.setSpacing(0) self.horizontalLayout_10.setObjectName("horizontalLayout_10") self.CryptoSaveLocBrowseL = QtWidgets.QLabel(self.CryptoSaveLocBrowse) self.CryptoSaveLocBrowseL.setMinimumSize(QtCore.QSize(0, 75)) font = QtGui.QFont() font.setFamily("Cabin Sketch") font.setPointSize(30) font.setBold(True) font.setItalic(True) font.setWeight(75) self.CryptoSaveLocBrowseL.setFont(font) self.CryptoSaveLocBrowseL.setStyleSheet("border: 0px;\n" "") self.CryptoSaveLocBrowseL.setAlignment(QtCore.Qt.AlignCenter) self.CryptoSaveLocBrowseL.setObjectName("CryptoSaveLocBrowseL") self.horizontalLayout_10.addWidget(self.CryptoSaveLocBrowseL) self.verticalLayout_18.addWidget(self.CryptoSaveLocBrowse) self.frame = QtWidgets.QFrame(self.CryptoSaveLoc) self.frame.setMaximumSize(QtCore.QSize(16777215, 38)) self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setObjectName("frame") self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.frame) self.horizontalLayout_9.setContentsMargins(5, 0, 8, 0) self.horizontalLayout_9.setSpacing(7) self.horizontalLayout_9.setObjectName("horizontalLayout_9") self.Password = QtWidgets.QLineEdit(self.frame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Password.sizePolicy().hasHeightForWidth()) self.Password.setSizePolicy(sizePolicy) self.Password.setMinimumSize(QtCore.QSize(0, 30)) self.Password.setMaximumSize(QtCore.QSize(16777215, 28)) font = QtGui.QFont() font.setFamily("Cabin Sketch") font.setBold(True) font.setItalic(True) font.setWeight(75) self.Password.setFont(font) self.Password.setStyleSheet("color: rgb(255, 76, 16);\n" "background-color: qlineargradient(spread:reflect, x1:1, y1:0.579, x2:0, y2:0.477273, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 55));") self.Password.setText("") self.Password.setObjectName("Password") self.horizontalLayout_9.addWidget(self.Password) self.verticalLayout_18.addWidget(self.frame) self.verticalLayout_17.addWidget(self.CryptoSaveLoc) self.verticalLayout_15.addWidget(self.CryptoFrameMiddle) self.CryptoFrameBottom = QtWidgets.QFrame(self.CryptoPage) self.CryptoFrameBottom.setStyleSheet("background-color: rgba(0, 0, 0, 0);") self.CryptoFrameBottom.setFrameShape(QtWidgets.QFrame.NoFrame) self.CryptoFrameBottom.setFrameShadow(QtWidgets.QFrame.Raised) self.CryptoFrameBottom.setObjectName("CryptoFrameBottom") self.verticalLayout_19 = QtWidgets.QVBoxLayout(self.CryptoFrameBottom) self.verticalLayout_19.setContentsMargins(0, 0, 0, 0) self.verticalLayout_19.setSpacing(0) self.verticalLayout_19.setObjectName("verticalLayout_19") self.SelectAlgo = QtWidgets.QFrame(self.CryptoFrameBottom) self.SelectAlgo.setMinimumSize(QtCore.QSize(0, 30)) self.SelectAlgo.setMaximumSize(QtCore.QSize(1000, 30)) self.SelectAlgo.setFrameShape(QtWidgets.QFrame.NoFrame) self.SelectAlgo.setFrameShadow(QtWidgets.QFrame.Raised) self.SelectAlgo.setObjectName("SelectAlgo") self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.SelectAlgo) self.horizontalLayout_11.setContentsMargins(3, 0, 7, 0) self.horizontalLayout_11.setSpacing(0) self.horizontalLayout_11.setObjectName("horizontalLayout_11") self.AlgoSelect = QtWidgets.QComboBox(self.SelectAlgo) self.AlgoSelect.setMinimumSize(QtCore.QSize(0, 25)) font = QtGui.QFont() font.setFamily("Cabin Sketch") font.setPointSize(6) font.setBold(True) font.setItalic(True) font.setWeight(75) self.AlgoSelect.setFont(font) self.AlgoSelect.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu) self.AlgoSelect.setLayoutDirection(QtCore.Qt.LeftToRight) self.AlgoSelect.setStyleSheet("border: 2px solid #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.AlgoSelect.setCurrentText("") self.AlgoSelect.setObjectName("AlgoSelect") self.horizontalLayout_11.addWidget(self.AlgoSelect) self.verticalLayout_19.addWidget(self.SelectAlgo) self.CryptoButtons = QtWidgets.QFrame(self.CryptoFrameBottom) self.CryptoButtons.setFrameShape(QtWidgets.QFrame.NoFrame) self.CryptoButtons.setFrameShadow(QtWidgets.QFrame.Raised) self.CryptoButtons.setObjectName("CryptoButtons") self.horizontalLayout_12 = QtWidgets.QHBoxLayout(self.CryptoButtons) self.horizontalLayout_12.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_12.setSpacing(0) self.horizontalLayout_12.setObjectName("horizontalLayout_12") self.Encrypt = QtWidgets.QPushButton(self.CryptoButtons) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Encrypt.sizePolicy().hasHeightForWidth()) self.Encrypt.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(43) font.setBold(True) font.setItalic(True) font.setWeight(75) self.Encrypt.setFont(font) self.Encrypt.setStyleSheet("QPushButton {\n" " color: rgba(255, 255, 255, 150);\n" " background-color: rgba(0, 0, 0, 0);\n" " border: 0px solid;\n" "}\n" "\n" "QPushButton::hover {\n" " \n" " color: rgba(255, 255, 255, 195);\n" " border: 0px solid;\n" " background-color: qlineargradient(spread:pad, x1:0.46305, y1:0.261, x2:0.478, y2:1, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 125));\n" "}") self.Encrypt.setObjectName("Encrypt") self.horizontalLayout_12.addWidget(self.Encrypt) self.Decrypt = QtWidgets.QPushButton(self.CryptoButtons) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.Decrypt.sizePolicy().hasHeightForWidth()) self.Decrypt.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(43) font.setBold(True) font.setItalic(True) font.setWeight(75) self.Decrypt.setFont(font) self.Decrypt.setStyleSheet("QPushButton {\n" " color: rgba(255, 255, 255, 150);\n" " background-color: rgba(0, 0, 0, 0);\n" " border: 0px solid;\n" "}\n" "\n" "QPushButton::hover {\n" " \n" " color: rgba(255, 255, 255, 195);\n" " border: 0px solid;\n" " background-color: qlineargradient(spread:pad, x1:0.46305, y1:0.261, x2:0.478, y2:1, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 125));\n" "}") self.Decrypt.setObjectName("Decrypt") self.horizontalLayout_12.addWidget(self.Decrypt) self.verticalLayout_19.addWidget(self.CryptoButtons) self.verticalLayout_15.addWidget(self.CryptoFrameBottom) self.Stack.addWidget(self.CryptoPage) self.TransferPage = QtWidgets.QWidget() self.TransferPage.setObjectName("TransferPage") self.verticalLayout_12 = QtWidgets.QVBoxLayout(self.TransferPage) self.verticalLayout_12.setContentsMargins(0, 0, 0, 0) self.verticalLayout_12.setSpacing(0) self.verticalLayout_12.setObjectName("verticalLayout_12") self.AddFileTransfer = QtWidgets.QFrame(self.TransferPage) self.AddFileTransfer.setStyleSheet("background-color: rgba(0, 0, 0, 0);\n" "") self.AddFileTransfer.setFrameShape(QtWidgets.QFrame.StyledPanel) self.AddFileTransfer.setFrameShadow(QtWidgets.QFrame.Raised) self.AddFileTransfer.setObjectName("AddFileTransfer") self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.AddFileTransfer) self.horizontalLayout_5.setContentsMargins(4, 2, 4, 0) self.horizontalLayout_5.setObjectName("horizontalLayout_5") self.DragDropTransfer = DragDropWidget(self.AddFileTransfer) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.DragDropTransfer.sizePolicy().hasHeightForWidth()) self.DragDropTransfer.setSizePolicy(sizePolicy) self.DragDropTransfer.setMinimumSize(QtCore.QSize(990, 176)) self.DragDropTransfer.setAcceptDrops(True) self.DragDropTransfer.setObjectName("DragDropTransfer") self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.DragDropTransfer) self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_6.setSpacing(0) self.horizontalLayout_6.setObjectName("horizontalLayout_6") self.DragDropLabelTransfer = QtWidgets.QLabel(self.DragDropTransfer) self.DragDropLabelTransfer.setMaximumSize(QtCore.QSize(992, 165)) font = QtGui.QFont() font.setFamily("DEADLY KILLERS DEMO") font.setPointSize(30) font.setBold(True) font.setItalic(True) font.setWeight(75) self.DragDropLabelTransfer.setFont(font) self.DragDropLabelTransfer.setStyleSheet("border: 2px dashed #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.DragDropLabelTransfer.setAlignment(QtCore.Qt.AlignCenter) self.DragDropLabelTransfer.setObjectName("DragDropLabelTransfer") self.horizontalLayout_6.addWidget(self.DragDropLabelTransfer) self.horizontalLayout_5.addWidget(self.DragDropTransfer) self.verticalLayout_12.addWidget(self.AddFileTransfer) self.Url_ButtonTransfer = QtWidgets.QFrame(self.TransferPage) self.Url_ButtonTransfer.setStyleSheet("background-color: rgba(0, 0, 0, 0);\n" "") self.Url_ButtonTransfer.setFrameShape(QtWidgets.QFrame.NoFrame) self.Url_ButtonTransfer.setFrameShadow(QtWidgets.QFrame.Raised) self.Url_ButtonTransfer.setObjectName("Url_ButtonTransfer") self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.Url_ButtonTransfer) self.verticalLayout_13.setContentsMargins(0, 0, 0, 0) self.verticalLayout_13.setSpacing(0) self.verticalLayout_13.setObjectName("verticalLayout_13") self.UrlTransfer = QtWidgets.QFrame(self.Url_ButtonTransfer) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.UrlTransfer.sizePolicy().hasHeightForWidth()) self.UrlTransfer.setSizePolicy(sizePolicy) self.UrlTransfer.setMaximumSize(QtCore.QSize(1000, 46)) self.UrlTransfer.setFrameShape(QtWidgets.QFrame.StyledPanel) self.UrlTransfer.setFrameShadow(QtWidgets.QFrame.Raised) self.UrlTransfer.setObjectName("UrlTransfer") self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.UrlTransfer) self.horizontalLayout_7.setObjectName("horizontalLayout_7") self.UrlLabel = QtWidgets.QLabel(self.UrlTransfer) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.UrlLabel.sizePolicy().hasHeightForWidth()) self.UrlLabel.setSizePolicy(sizePolicy) self.UrlLabel.setMinimumSize(QtCore.QSize(0, 31)) self.UrlLabel.setMaximumSize(QtCore.QSize(38, 50)) font = QtGui.QFont() font.setFamily("DEADLY KILLERS DEMO") font.setPointSize(12) font.setBold(True) font.setWeight(75) self.UrlLabel.setFont(font) self.UrlLabel.setStyleSheet("color: rgb(211, 113, 27);") self.UrlLabel.setObjectName("UrlLabel") self.horizontalLayout_7.addWidget(self.UrlLabel) self.Url = QtWidgets.QLineEdit(self.UrlTransfer) self.Url.setMinimumSize(QtCore.QSize(0, 32)) self.Url.setMaximumSize(QtCore.QSize(16777215, 16777215)) font = QtGui.QFont() font.setFamily("Courgette") font.setPointSize(11) self.Url.setFont(font) self.Url.setStyleSheet("color: rgb(255, 76, 16);\n" "background-color: qlineargradient(spread:reflect, x1:1, y1:0.579, x2:0, y2:0.477273, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 55));") self.Url.setInputMask("") self.Url.setObjectName("Url") self.horizontalLayout_7.addWidget(self.Url) self.verticalLayout_13.addWidget(self.UrlTransfer) self.ButtonTransferFrame = QtWidgets.QFrame(self.Url_ButtonTransfer) font = QtGui.QFont() font.setPointSize(11) self.ButtonTransferFrame.setFont(font) self.ButtonTransferFrame.setFrameShape(QtWidgets.QFrame.NoFrame) self.ButtonTransferFrame.setFrameShadow(QtWidgets.QFrame.Raised) self.ButtonTransferFrame.setObjectName("ButtonTransferFrame") self.verticalLayout_14 = QtWidgets.QVBoxLayout(self.ButtonTransferFrame) self.verticalLayout_14.setContentsMargins(0, 0, 0, 0) self.verticalLayout_14.setSpacing(0) self.verticalLayout_14.setObjectName("verticalLayout_14") self.ButtonTransfer = QtWidgets.QPushButton(self.ButtonTransferFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.ButtonTransfer.sizePolicy().hasHeightForWidth()) self.ButtonTransfer.setSizePolicy(sizePolicy) self.ButtonTransfer.setMinimumSize(QtCore.QSize(998, 0)) self.ButtonTransfer.setMaximumSize(QtCore.QSize(998, 16777215)) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(74) font.setBold(True) font.setItalic(True) font.setWeight(75) self.ButtonTransfer.setFont(font) self.ButtonTransfer.setStyleSheet("QPushButton {\n" " color: rgba(255, 255, 255, 150);\n" " background-color: rgba(0, 0, 0, 0);\n" " border: 0px solid;\n" "}\n" "\n" "QPushButton::hover {\n" " \n" " color: rgba(255, 255, 255, 195);\n" " border: 0px solid;\n" " background-color: qlineargradient(spread:pad, x1:0.46305, y1:0.261, x2:0.478, y2:1, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 125));\n" "}") self.ButtonTransfer.setObjectName("ButtonTransfer") self.verticalLayout_14.addWidget(self.ButtonTransfer) self.verticalLayout_13.addWidget(self.ButtonTransferFrame) self.verticalLayout_12.addWidget(self.Url_ButtonTransfer) self.Stack.addWidget(self.TransferPage) self.StegoPage = QtWidgets.QWidget() self.StegoPage.setObjectName("StegoPage") self.verticalLayout_20 = QtWidgets.QVBoxLayout(self.StegoPage) self.verticalLayout_20.setContentsMargins(0, 0, 0, 0) self.verticalLayout_20.setSpacing(0) self.verticalLayout_20.setObjectName("verticalLayout_20") self.StegoPagesFrame = QtWidgets.QFrame(self.StegoPage) self.StegoPagesFrame.setStyleSheet("background-color: rgba(0, 0, 0, 0);\n" "") self.StegoPagesFrame.setFrameShape(QtWidgets.QFrame.NoFrame) self.StegoPagesFrame.setFrameShadow(QtWidgets.QFrame.Raised) self.StegoPagesFrame.setObjectName("StegoPagesFrame") self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.StegoPagesFrame) self.horizontalLayout_13.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_13.setSpacing(0) self.horizontalLayout_13.setObjectName("horizontalLayout_13") self.StegoPage_2 = QtWidgets.QStackedWidget(self.StegoPagesFrame) self.StegoPage_2.setObjectName("StegoPage_2") self.StegoOptions = QtWidgets.QWidget() self.StegoOptions.setObjectName("StegoOptions") self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.StegoOptions) self.horizontalLayout_14.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_14.setObjectName("horizontalLayout_14") self.StegoMainButtomFrame = QtWidgets.QFrame(self.StegoOptions) self.StegoMainButtomFrame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.StegoMainButtomFrame.setFrameShadow(QtWidgets.QFrame.Raised) self.StegoMainButtomFrame.setObjectName("StegoMainButtomFrame") self.verticalLayout_21 = QtWidgets.QVBoxLayout(self.StegoMainButtomFrame) self.verticalLayout_21.setContentsMargins(0, 0, 0, 0) self.verticalLayout_21.setObjectName("verticalLayout_21") self.HideButton = QtWidgets.QPushButton(self.StegoMainButtomFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.HideButton.sizePolicy().hasHeightForWidth()) self.HideButton.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(65) self.HideButton.setFont(font) self.HideButton.setStyleSheet("QPushButton {\n" " color: rgba(255, 255, 255, 150);\n" " background-color: rgba(0, 0, 0, 0);\n" " border: 0px solid;\n" "}\n" "\n" "QPushButton::hover {\n" " \n" " color: rgba(255, 255, 255, 195);\n" " border: 0px solid;\n" " background-color: qlineargradient(spread:pad, x1:0.46305, y1:0.261, x2:0.478, y2:1, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 125));\n" "}") self.HideButton.setObjectName("HideButton") self.verticalLayout_21.addWidget(self.HideButton) self.RecoverButton = QtWidgets.QPushButton(self.StegoMainButtomFrame) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.RecoverButton.sizePolicy().hasHeightForWidth()) self.RecoverButton.setSizePolicy(sizePolicy) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(49) self.RecoverButton.setFont(font) self.RecoverButton.setStyleSheet("QPushButton {\n" " color: rgba(255, 255, 255, 150);\n" " background-color: rgba(0, 0, 0, 0);\n" " border: 0px solid;\n" "}\n" "\n" "QPushButton::hover {\n" " \n" " color: rgba(255, 255, 255, 195);\n" " border: 0px solid;\n" " background-color: qlineargradient(spread:pad, x1:0.46305, y1:0.261, x2:0.478, y2:1, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 125));\n" "}") self.RecoverButton.setObjectName("RecoverButton") self.verticalLayout_21.addWidget(self.RecoverButton) self.horizontalLayout_14.addWidget(self.StegoMainButtomFrame) self.StegoPage_2.addWidget(self.StegoOptions) self.Hide = QtWidgets.QWidget() self.Hide.setObjectName("Hide") self.verticalLayout_22 = QtWidgets.QVBoxLayout(self.Hide) self.verticalLayout_22.setContentsMargins(2, 0, 2, 0) self.verticalLayout_22.setSpacing(4) self.verticalLayout_22.setObjectName("verticalLayout_22") self.Infile = QtWidgets.QFrame(self.Hide) self.Infile.setMaximumSize(QtCore.QSize(16777215, 90)) self.Infile.setFrameShape(QtWidgets.QFrame.NoFrame) self.Infile.setFrameShadow(QtWidgets.QFrame.Raised) self.Infile.setObjectName("Infile") self.horizontalLayout_15 = QtWidgets.QHBoxLayout(self.Infile) self.horizontalLayout_15.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_15.setSpacing(0) self.horizontalLayout_15.setObjectName("horizontalLayout_15") self.InfileDragDrop = DragDropWidget(self.Infile) self.InfileDragDrop.setAcceptDrops(True) self.InfileDragDrop.setObjectName("InfileDragDrop") self.horizontalLayout_16 = QtWidgets.QHBoxLayout(self.InfileDragDrop) self.horizontalLayout_16.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_16.setSpacing(0) self.horizontalLayout_16.setObjectName("horizontalLayout_16") self.InfileDragDropLabel = QtWidgets.QLabel(self.InfileDragDrop) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(22) font.setItalic(True) self.InfileDragDropLabel.setFont(font) self.InfileDragDropLabel.setStyleSheet("border: 2px dashed #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.InfileDragDropLabel.setAlignment(QtCore.Qt.AlignCenter) self.InfileDragDropLabel.setObjectName("InfileDragDropLabel") self.horizontalLayout_16.addWidget(self.InfileDragDropLabel) self.horizontalLayout_15.addWidget(self.InfileDragDrop) self.verticalLayout_22.addWidget(self.Infile) self.Datafile = QtWidgets.QFrame(self.Hide) self.Datafile.setMaximumSize(QtCore.QSize(16777215, 87)) self.Datafile.setFrameShape(QtWidgets.QFrame.StyledPanel) self.Datafile.setFrameShadow(QtWidgets.QFrame.Raised) self.Datafile.setObjectName("Datafile") self.DatafileDragDrop = DragDropWidget(self.Datafile) self.DatafileDragDrop.setGeometry(QtCore.QRect(0, 0, 996, 87)) self.DatafileDragDrop.setAcceptDrops(True) self.DatafileDragDrop.setObjectName("DatafileDragDrop") self.horizontalLayout_17 = QtWidgets.QHBoxLayout(self.DatafileDragDrop) self.horizontalLayout_17.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_17.setSpacing(0) self.horizontalLayout_17.setObjectName("horizontalLayout_17") self.DatafileDragDropLabel = QtWidgets.QLabel(self.DatafileDragDrop) self.DatafileDragDropLabel.setMinimumSize(QtCore.QSize(0, 0)) self.DatafileDragDropLabel.setMaximumSize(QtCore.QSize(16777215, 84)) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(22) font.setItalic(True) self.DatafileDragDropLabel.setFont(font) self.DatafileDragDropLabel.setStyleSheet("border: 2px dashed #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.DatafileDragDropLabel.setAlignment(QtCore.Qt.AlignCenter) self.DatafileDragDropLabel.setObjectName("DatafileDragDropLabel") self.horizontalLayout_17.addWidget(self.DatafileDragDropLabel) self.verticalLayout_22.addWidget(self.Datafile) self.BrowseSaveLocationStego = QtWidgets.QFrame(self.Hide) self.BrowseSaveLocationStego.setMaximumSize(QtCore.QSize(16777215, 50)) self.BrowseSaveLocationStego.setFrameShape(QtWidgets.QFrame.StyledPanel) self.BrowseSaveLocationStego.setFrameShadow(QtWidgets.QFrame.Raised) self.BrowseSaveLocationStego.setObjectName("BrowseSaveLocationStego") self.SaveLocBrowseStego = DragDropWidget(self.BrowseSaveLocationStego) self.SaveLocBrowseStego.setGeometry(QtCore.QRect(0, 0, 996, 50)) self.SaveLocBrowseStego.setMaximumSize(QtCore.QSize(16777215, 50)) self.SaveLocBrowseStego.setAcceptDrops(True) self.SaveLocBrowseStego.setObjectName("SaveLocBrowseStego") self.horizontalLayout_18 = QtWidgets.QHBoxLayout(self.SaveLocBrowseStego) self.horizontalLayout_18.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_18.setSpacing(0) self.horizontalLayout_18.setObjectName("horizontalLayout_18") self.SaveLocBrowseStegoLabel = QtWidgets.QLabel(self.SaveLocBrowseStego) self.SaveLocBrowseStegoLabel.setMaximumSize(QtCore.QSize(16777215, 50)) font = QtGui.QFont() font.setFamily("Feast of Flesh BB") font.setPointSize(22) font.setItalic(True) self.SaveLocBrowseStegoLabel.setFont(font) self.SaveLocBrowseStegoLabel.setStyleSheet("border: 2px dashed #aaa;\n" "color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:0 rgba(158, 7, 23, 180), stop:1 rgba(255, 130, 20, 200));\n" "background-color: qlineargradient(spread:pad, x1:0.462049, y1:0.921, x2:0.452, y2:0, stop:0.174129 rgba(74, 74, 74, 42), stop:0.567164 rgba(78, 59, 58, 30));\n" "") self.SaveLocBrowseStegoLabel.setAlignment(QtCore.Qt.AlignCenter) self.SaveLocBrowseStegoLabel.setObjectName("SaveLocBrowseStegoLabel") self.horizontalLayout_18.addWidget(self.SaveLocBrowseStegoLabel) self.verticalLayout_22.addWidget(self.BrowseSaveLocationStego) self.ButtonsStego = QtWidgets.QFrame(self.Hide) self.ButtonsStego.setFrameShape(QtWidgets.QFrame.NoFrame) self.ButtonsStego.setFrameShadow(QtWidgets.QFrame.Raised) self.ButtonsStego.setObjectName("ButtonsStego") self.verticalLayout_23 = QtWidgets.QVBoxLayout(self.ButtonsStego) self.verticalLayout_23.setContentsMargins(0, 0, 0, 0) self.verticalLayout_23.setSpacing(0) self.verticalLayout_23.setObjectName("verticalLayout_23") self.LSBFrame = QtWidgets.QFrame(self.ButtonsStego) self.LSBFrame.setMinimumSize(QtCore.QSize(0, 0)) self.LSBFrame.setMaximumSize(QtCore.QSize(16777215, 35)) self.LSBFrame.setFrameShape(QtWidgets.QFrame.NoFrame) self.LSBFrame.setFrameShadow(QtWidgets.QFrame.Raised) self.LSBFrame.setObjectName("LSBFrame") self.horizontalLayout_19 = QtWidgets.QHBoxLayout(self.LSBFrame) self.horizontalLayout_19.setContentsMargins(0, 0, 0, 0) self.horizontalLayout_19.setSpacing(0) self.horizontalLayout_19.setObjectName("horizontalLayout_19") self.lsb = QtWidgets.QLineEdit(self.LSBFrame) self.lsb.setMinimumSize(QtCore.QSize(0, 35)) self.lsb.setMaximumSize(QtCore.QSize(16777215, 35)) font = QtGui.QFont() font.setFamily("Courgette") font.setPointSize(11) font.setBold(True) font.setItalic(True) font.setWeight(75) self.lsb.setFont(font) self.lsb.setStyleSheet("color: rgb(255, 76, 16);\n" "background-color: qlineargradient(spread:reflect, x1:1, y1:0.579, x2:0, y2:0.477273, stop:0.268657 rgba(62, 59, 59, 246), stop:1 rgba(211, 113, 27, 55));") self.lsb.setAlignment(QtCore.Qt.AlignCenter)
<reponame>Murabei-OpenSource-Codes/pumpwood-djangoviews """Create views using Pumpwood pattern.""" import os import pandas as pd import simplejson as json from io import BytesIO from django.conf import settings from django.http import HttpResponse from rest_framework.parsers import JSONParser from rest_framework import viewsets, status from rest_framework.response import Response from werkzeug.utils import secure_filename from pumpwood_communication import exceptions from pumpwood_communication.serializers import PumpWoodJSONEncoder from django.db.models.fields import NOT_PROVIDED from pumpwood_djangoviews.renderer import PumpwoodJSONRenderer from pumpwood_djangoviews.query import filter_by_dict from pumpwood_djangoviews.action import load_action_parameters from pumpwood_djangoviews.aux.map_django_types import django_map from django.db.models.fields.files import FieldFile def save_serializer_instance(serializer_instance): is_valid = serializer_instance.is_valid() if is_valid: return serializer_instance.save() else: raise exceptions.PumpWoodException(serializer_instance.errors) class PumpWoodRestService(viewsets.ViewSet): """Basic View-Set for pumpwood rest end-points.""" _view_type = "simple" renderer_classes = [PumpwoodJSONRenderer] ##################### # Route information # endpoint_description = None dimentions = {} icon = None ##################### service_model = None storage_object = None microservice = None trigger = False # List fields serializer = None list_fields = None foreign_keys = {} file_fields = {} # Front-end uses 50 as limit to check if all data have been fetched, # if change this parameter, be sure to update front-end list component. list_paginate_limit = 50 @staticmethod def _allowed_extension(filename, allowed_extensions): extension = 'none' if '.' in filename: extension = filename.rsplit('.', 1)[1].lower() if "*" not in allowed_extensions: if extension not in allowed_extensions: return [( "File {filename} with extension {extension} not " + "allowed.\n Allowed extensions: {allowed_extensions}" ).format(filename=filename, extension=extension, allowed_extensions=str(allowed_extensions))] return [] def list(self, request): """ View function to list objects with pagination. Number of objects are limited by settings.REST_FRAMEWORK['PAGINATE_BY']. To get next page, use exclude_dict['pk__in': [list of the received pks]] to get more objects. Use to limit the query .query.filter_by_dict function. :param request.data['filter_dict']: Dictionary passed as objects.filter(**filter_dict) :type request.data['filter_dict']: dict :param request.data['exclude_dict']: Dictionary passed as objects.exclude(**exclude_dict) :type request.data['exclude_dict']: dict :param request.data['order_by']: List passed as objects.order_by(*order_by) :type request.data['order_by']: list :return: A list of objects using list_serializer """ try: request_data = request.data limit = request_data.pop("limit", None) list_paginate_limit = limit or self.list_paginate_limit fields = request_data.pop("fields", None) list_fields = fields or self.list_fields arg_dict = {'query_set': self.service_model.objects.all()} arg_dict.update(request_data) query_set = filter_by_dict(**arg_dict)[:list_paginate_limit] return Response(self.serializer( query_set, many=True, fields=list_fields).data) except TypeError as e: raise exceptions.PumpWoodQueryException(message=str(e)) def list_without_pag(self, request): """List data without pagination. View function to list objects. Basicaley the same of list, but without limitation by settings.REST_FRAMEWORK['PAGINATE_BY']. :param request.data['filter_dict']: Dictionary passed as objects.filter(**filter_dict) :type request.data['filter_dict']: dict :param request.data['exclude_dict']: Dictionary passed as objects.exclude(**exclude_dict) :type request.data['exclude_dict']: dict :param request.data['order_by']: List passed as objects.order_by(*order_by) :type request.data['order_by']: list :return: A list of objects using list_serializer .. note:: Be careful with the number of the objects that will be retrieved """ try: request_data = request.data fields = request_data.pop("fields", None) list_fields = fields or self.list_fields arg_dict = {'query_set': self.service_model.objects.all()} arg_dict.update(request_data) query_set = filter_by_dict(**arg_dict) return Response(self.serializer( query_set, many=True, fields=list_fields).data) except TypeError as e: raise exceptions.PumpWoodQueryException( message=str(e)) def retrieve(self, request, pk=None): """ Retrieve view, uses the retrive_serializer to return object with pk. :param int pk: Object pk to be retrieve :return: The representation of the object passed by self.retrive_serializer :rtype: dict """ obj = self.service_model.objects.get(pk=pk) return Response(self.serializer(obj, many=False).data) def retrieve_file(self, request, pk: int): """ Read file without stream. Args: pk (int): Pk of the object to save file field. file_field(str): File field to receive stream file. Returns: A stream of bytes with da file. """ if self.storage_object is None: raise exceptions.PumpWoodForbidden( "storage_object not set") file_field = request.query_params.get('file-field', None) if file_field not in self.file_fields.keys(): msg = ( "'{file_field}' must be set on file_fields " "dictionary.").format(file_field=file_field) raise exceptions.PumpWoodForbidden(msg) obj = self.service_model.objects.get(id=pk) file_path = getattr(obj, file_field) if isinstance(file_path, FieldFile): file_path = file_path.name if file_path is None: raise exceptions.PumpWoodObjectDoesNotExist( "field [{}] not found at object".format(file_field)) file_data = self.storage_object.read_file(file_path) file_name = os.path.basename(file_path) response = HttpResponse(content=BytesIO(file_data["data"])) response['Content-Type'] = file_data["content_type"] response['Content-Disposition'] = \ 'attachment; filename=%s' % file_name return response def delete(self, request, pk=None): """ Delete view. :param int pk: Object pk to be retrieve """ obj = self.service_model.objects.get(pk=pk) return_data = self.serializer(obj, many=False).data obj.delete() return Response(return_data, status=200) def delete_many(self, request): """ Delete many data using filter. :param request.data['filter_dict']: Dictionary passed as objects.filter(**filter_dict) :type request.data['filter_dict']: dict :param request.data['exclude_dict']: Dictionary passed as objects.exclude(**exclude_dict) :type request.data['exclude_dict']: dict :return: True if delete is ok """ try: arg_dict = {'query_set': self.service_model.objects.all()} arg_dict.update(request.data) query_set = filter_by_dict(**arg_dict) query_set.delete() return Response(True, status=200) except TypeError as e: raise exceptions.PumpWoodQueryException( message=str(e)) def remove_file_field(self, request, pk: int) -> bool: """ Remove file field. Args: pk (int): pk of the object. Kwargs: No kwargs for this function. Raises: PumpWoodForbidden: If file_file is not in file_fields keys of the view. PumpWoodException: Propagates exceptions from storage_objects. """ file_field = request.query_params.get('file_field', None) if file_field not in self.file_fields.keys(): raise exceptions.PumpWoodForbidden( "file_field must be set on self.file_fields dictionary.") obj = self.service_model.objects.get(id=pk) file = getattr(obj, file_field) if file is None: raise exceptions.PumpWoodObjectDoesNotExist( "field [{}] not found at object".format(file_field)) else: file_path = file.name setattr(obj, file_field, None) obj.save() try: self.storage_object.delete_file(file_path) return Response(True) except Exception as e: raise exceptions.PumpWoodException(str(e)) def save(self, request): """ Save and update object acording to request.data. Object will be updated if request.data['pk'] is not None. :param dict request.data: Object representation as self.retrive_serializer :raise PumpWoodException: 'Object model class diferent from {service_model} : {service_model}' request.data['service_model'] not the same as self.service_model.__name__ """ request_data: dict = None if "application/json" in request.content_type.lower(): request_data = request.data else: request_data = request.data.dict() for k in request_data.keys(): if k not in self.file_fields.keys(): request_data[k] = json.loads(request_data[k]) data_pk = request_data.get('pk') saved_obj = None for field in self.file_fields.keys(): request_data.pop(field, None) # update if data_pk: data_to_update = self.service_model.objects.get(pk=data_pk) serializer = self.serializer( data_to_update, data=request_data, context={'request': request}) saved_obj = save_serializer_instance(serializer) response_status = status.HTTP_200_OK # save else: serializer = self.serializer( data=request_data, context={'request': request}) saved_obj = save_serializer_instance(serializer) response_status = status.HTTP_201_CREATED # Uploading files object_errors = {} for field in self.file_fields.keys(): field_errors = [] if field in request.FILES: file = request.FILES[field] file_name = secure_filename(file.name) field_errors.extend(self._allowed_extension( filename=file_name, allowed_extensions=self.file_fields[field])) filename = "{}___{}".format(saved_obj.id, file_name) if len(field_errors) != 0: object_errors[field] = field_errors else: model_class = self.service_model.__name__.lower() file_path = '{model_class}__{field}/'.format( model_class=model_class, field=field) storage_filepath = self.storage_object.write_file( file_path=file_path, file_name=filename, data=file.read(), content_type=file.content_type, if_exists='overide') setattr(saved_obj, field, storage_filepath) if object_errors != {}: message = "error when saving object: " \ if data_pk is None else "error when updating object: " payload = object_errors message_to_append = [] for key, value in object_errors.items(): message_to_append.append(key + ", " + str(value)) message = message + "; ".join(message_to_append) raise exceptions.PumpWoodObjectSavingException( message=message, payload=payload) saved_obj.save() if self.microservice is not None and self.trigger: # Process ETLTrigger for the model class self.microservice.login() if data_pk is None: self.microservice.execute_action( "ETLTrigger", action="process_triggers", parameters={ "model_class": self.service_model.__name__.lower(), "type": "create", "pk": None, "action_name": None}) else: self.microservice.execute_action( "ETLTrigger", action="process_triggers", parameters={ "model_class": self.service_model.__name__.lower(), "type": "update", "pk": saved_obj.pk, "action_name": None}) # Overhead, serializando e deserializando o objecto return Response( self.serializer(saved_obj).data, status=response_status) def get_actions(self): """Get all actions with action decorator.""" # this import works here only import inspect function_dict = dict(inspect.getmembers( self.service_model, predicate=inspect.isfunction)) method_dict = dict(inspect.getmembers( self.service_model, predicate=inspect.ismethod)) method_dict.update(function_dict) actions = { name: func for name, func in method_dict.items() if getattr(func, 'is_action', False)} return actions def list_actions(self, request): """List model exposed actions.""" actions = self.get_actions() action_descriptions = [ action.action_object.to_dict() for name, action in actions.items()] return Response(action_descriptions) def list_actions_with_objects(self, request): """List model exposed actions acording to selected objects.""" actions = self.get_actions() action_descriptions = [ action.action_object.description for name, action in actions.items()] return Response(action_descriptions) def execute_action(self, request, action_name, pk=None): """Execute action over object or class using parameters.""" parameters = request.data actions = self.get_actions() rest_action_names = list(actions.keys()) if action_name not in rest_action_names: message = ( "There is no method {action} in rest actions " "for {class_name}").format( action=action_name, class_name=self.service_model.__name__) raise exceptions.PumpWoodForbidden( message=message, payload={"action_name": action_name}) action = getattr(self.service_model, action_name) if pk is None and not action.action_object.is_static_function: msg_template = ( "Action [{action}] at model [{class_name}] is not " "a classmethod and not pk provided.") message = msg_template.format( action=action_name, class_name=self.service_model.__name__) raise exceptions.PumpWoodActionArgsException( message=message, payload={"action_name": action_name}) if pk is not None and action.action_object.is_static_function: msg_template = ( "Action [{action}] at model [{class_name}] is a" "classmethod and pk provided.") message = msg_template.format( action=action_name, class_name=self.service_model.__name__) raise exceptions.PumpWoodActionArgsException( message=message, payload={"action_name": action_name}) object_dict = None action = None if pk is not None: model_object = self.service_model.objects.filter(pk=pk).first() if model_object is None: message_template = ( "Requested object {service_model}[{pk}] not found.") temp_service_model = \ self.service_model.__name__ message = message_template.format( service_model=temp_service_model, pk=pk) raise exceptions.PumpWoodObjectDoesNotExist( message=message, payload={ "service_model": temp_service_model, "pk": pk}) action
init_inputs(self): for name in self._inputs: if name not in self._consts: self.add_placeholder_op(name) def inference_shapes(self): _LOG.info("Inference shapes") self.init_input_shape() for node in self._nodes: node.inference_shape(self._batch, self._shapes, self._nodes_by_name) def fuse_dynamic_lstm(self, node, lstm_input, nodes_before, nodes_after): _LOG.info("Fuse LstmNonlinear.") out_ifdef = nodes_before[0] offset_out = out_ifdef.read_attribute('offset') prev_out_name = out_ifdef.inputs[0] first_affine = nodes_before[2] cell_ifdef = nodes_before[3] offset_cell = cell_ifdef.read_attribute('offset') prev_cell_name = cell_ifdef.inputs[0] second_affine = nodes_after[2] scale_node = nodes_after[-3] scale = scale_node.read_attribute('scale') inputs = list() inputs.append(lstm_input) inputs.append(out_ifdef.inputs[0]) inputs.append(cell_ifdef.inputs[0]) inputs.append(first_affine.inputs[1]) inputs.append(node.inputs[1]) inputs.append(second_affine.inputs[1]) prev_cell = nodes_after[-2] prev_out = nodes_after[-1] prev_cell_dim = prev_cell.read_attribute('dim') prev_out_dim = prev_out.read_attribute('dim') out_dimrange = nodes_after[3] prev_out_offset = out_dimrange.read_attribute('offset') lstm_attrs = dict() lstm_attrs['prev_out_delay'] = offset_out lstm_attrs['prev_cell_delay'] = offset_cell lstm_attrs['scale'] = scale lstm_attrs['prev_out_dim'] = prev_out_dim lstm_attrs['prev_cell_dim'] = prev_cell_dim lstm_attrs['prev_out_offset'] = prev_out_offset lstm_attrs['chunk_size'] = self._chunk_size if len(first_affine.inputs) == 3: inputs.append(first_affine.inputs[2]) lstm_attrs['bias_a'] = 1 if len(second_affine.inputs) == 3: inputs.append(second_affine.inputs[2]) lstm_attrs['bias_b'] = 1 node_name = node.name + '.fused' consts = {} for input in inputs: if input in self._consts: consts[input] = self._consts[input] outputs = list() outputs.append(node_name) outputs.append(prev_out_name.replace(".IfDefined", "")) outputs.append(prev_cell_name.replace(".IfDefined", "")) prev_cell_shape = [self._batch, abs(offset_cell), prev_cell_dim] prev_out_shape = [self._batch, abs(offset_out), prev_out_dim] self._shapes[inputs[1]] = prev_out_shape self._shapes[inputs[2]] = prev_cell_shape self._shapes[outputs[1]] = prev_out_shape self._shapes[outputs[2]] = prev_cell_shape dynamic_lstm_node = make_node(node_name, KaldiOpType.DynamicLSTM.name, inputs, outputs, lstm_attrs, consts) return dynamic_lstm_node def remove_nodes(self, nodes_to_remove): self._nodes_by_name.clear() new_nodes = [node for node in self._nodes if node not in nodes_to_remove] self._nodes = new_nodes def add_new_nodes(self, new_nodes): for node in new_nodes: kaldi_check(node.name not in self._nodes, "Node(%s) is already in graph." % node.name) self._nodes.append(node) def replace_inputs_outputs(self, inputs_to_replace, outputs_to_replace): for node in self._nodes: for i in range(len(node.inputs)): if node.inputs[i] in inputs_to_replace: node.inputs[i] = inputs_to_replace[node.inputs[i]] for i in range(len(node.nexts)): if node.nexts[i] in outputs_to_replace: node.nexts[i] = outputs_to_replace[node.nexts[i]] for i in range(len(self._outputs)): if self._outputs[i] in inputs_to_replace: self._outputs[i] = inputs_to_replace[self._outputs[i]] def update_with_fused_nodes(self, fused_nodes, nodes_to_remove, outputs_to_replace, inputs_to_replace): self.remove_nodes(nodes_to_remove) self.add_new_nodes(fused_nodes) self.replace_inputs_outputs(inputs_to_replace, outputs_to_replace) self.update_nodes_by_name() self.reorder_nodes(False) def update_nodes_by_name(self): self._nodes_by_name.clear() for node in self._nodes: self._nodes_by_name[node.name] = node def check_before_lstm(self, lstm_node): ifdef_inputs = [] input = lstm_node.inputs[0] if input not in self._nodes_by_name: return False, None, None, None append_a = self._nodes_by_name[input] if append_a.type != KaldiOpType.Append.name or \ len(append_a.inputs) != 2: return False, None, None, None sup_affine_name = append_a.inputs[0] sup_ifdef_a_name = append_a.inputs[1] if sup_affine_name not in self._nodes_by_name or \ sup_ifdef_a_name not in self._nodes_by_name: return False, None, None, None affine = self._nodes_by_name[sup_affine_name] ifdef_a = self._nodes_by_name[sup_ifdef_a_name] if affine.type != KaldiOpType.Gemm.name or \ ifdef_a.type != KaldiOpType.IfDefined.name: return False, None, None, None ifdef_inputs.append(ifdef_a.inputs[0]) if affine.inputs[0] not in self._nodes_by_name: return False, None, None, None append_b = self._nodes_by_name[affine.inputs[0]] if append_b.type != KaldiOpType.Append.name or \ len(append_b.inputs) != 2: return False, None, None, None input_name = append_b.inputs[0] ifdef_b_name = append_b.inputs[1] if ifdef_b_name in self._nodes_by_name: ifdef_b = self._nodes_by_name[ifdef_b_name] if ifdef_b.type == KaldiOpType.IfDefined.name: ifdef_inputs.append(ifdef_b.inputs[0]) nodes_before = [ifdef_b, append_b, affine, ifdef_a, append_a] return (True, input_name, ifdef_inputs, nodes_before) return False, None, None, None def check_after_lstm(self, lstm_node, ifdef_inputs): nodes_after = [] if len(lstm_node.nexts) != 2: return False, None, None slice_a_name = lstm_node.nexts[0] slice_b_name = lstm_node.nexts[1] if slice_a_name not in self._nodes_by_name or \ slice_b_name not in self._nodes_by_name: return False, None, None slice_a = self._nodes_by_name[slice_a_name] slice_b = self._nodes_by_name[slice_b_name] if slice_a.type != KaldiOpType.DimRange.name or \ slice_b.type != KaldiOpType.DimRange.name: return False, None, None if slice_a.nexts[0] not in self._nodes_by_name or \ slice_b.nexts[0] not in self._nodes_by_name: return False, None, None left_node = self._nodes_by_name[slice_a.nexts[0]] right_node = self._nodes_by_name[slice_b.nexts[0]] check_left_right = (left_node.type == KaldiOpType.Gemm.name and right_node.type == KaldiOpType.Append.name) or \ (left_node.type == KaldiOpType.Append.name and right_node.type == KaldiOpType.Gemm.name) if check_left_right is False: return False, None, None if left_node.type == KaldiOpType.Gemm.name and \ right_node.type == KaldiOpType.Append.name: append_node = right_node affine_node = left_node nodes_after.append(slice_a) nodes_after.append(slice_b) else: append_node = left_node affine_node = right_node nodes_after.append(slice_b) nodes_after.append(slice_a) nodes_after.append(affine_node) if len(append_node.inputs) != 2: return False, None, None if slice_b_name == append_node.inputs[1]: dim_range_b_name = append_node.inputs[0] else: dim_range_b_name = append_node.inputs[1] if dim_range_b_name not in affine_node.nexts or \ append_node.nexts[0] not in self._nodes_by_name: return False, None, None nodes_after.append(self._nodes_by_name[dim_range_b_name]) nodes_after.append(append_node) scale_node = self._nodes_by_name[append_node.nexts[0]] if scale_node.type != KaldiOpType.Scale.name or \ len(scale_node.nexts) != 2: return False, None, None nodes_after.append(scale_node) if scale_node.nexts[0] not in self._nodes_by_name or \ scale_node.nexts[1] not in self._nodes_by_name: return False, None, None last_dim_range_0 = self._nodes_by_name[scale_node.nexts[0]] last_dim_range_1 = self._nodes_by_name[scale_node.nexts[1]] if last_dim_range_0.type != KaldiOpType.DimRange.name or \ last_dim_range_1.type != KaldiOpType.DimRange.name: return False, None, None if (last_dim_range_0.name in ifdef_inputs or last_dim_range_0.name + '.IfDefined' in ifdef_inputs) and \ (last_dim_range_1.name in ifdef_inputs or last_dim_range_1.name + '.IfDefined' in ifdef_inputs): if last_dim_range_0.name == ifdef_inputs[0] or \ last_dim_range_0.name + '.IfDefined' == ifdef_inputs[0]: nodes_after.append(last_dim_range_0) nodes_after.append(last_dim_range_1) else: nodes_after.append(last_dim_range_1) nodes_after.append(last_dim_range_0) return True, affine_node.name, nodes_after return False, None, None def check_extraction_pooling(self, node): if len(node.inputs) != 1: _LOG.info(node.name, "Inputs > 1 ") return None input_name = node.inputs[0] if input_name not in self._nodes_by_name: _LOG.info(input_name, "not in nodes by name ") return None extraction_node = self._nodes_by_name[input_name] if extraction_node.type != \ KaldiOpType.StatisticsExtraction.name: _LOG.info(input_name, "is not StatisticsExtraction.") return None if len(node.nexts) == 1: return [extraction_node, node] else: _LOG.info("nexts > 1.") return None def check_fuse_extraction_pooling(self, node): extract_pooling_pack = self.check_extraction_pooling(node) if extract_pooling_pack is not None: _LOG.info("Fuse StatisticsExtraction/StatisticsPooling " "to ExtractPooling.") extraction_node = extract_pooling_pack[0] pooling_node = extract_pooling_pack[1] extract_input_dim = extraction_node.read_attribute('input_dim') extract_input_period = extraction_node.read_attribute( 'input_period') extract_output_period = extraction_node.read_attribute( 'output_period') include_variance = extraction_node.read_attribute( 'include_variance') num_log_count = pooling_node.read_attribute( 'num_log_count_features') left_context = pooling_node.read_attribute('left_context') right_context = pooling_node.read_attribute('right_context') variance_floor = pooling_node.read_attribute('variance_floor') output_stddevs = pooling_node.read_attribute('output_stddevs') pooling_input_period = pooling_node.read_attribute('input_period') kaldi_check(pooling_input_period == extract_output_period, "StatisticsExtraction's output period should" " be equal to StatisticsPooling's input period.") extract_pooling_attrs = { 'input_period': extract_input_period, 'output_period': extract_output_period, 'include_variance': include_variance, 'input_dim': extract_input_dim, 'left_context': left_context, 'right_context': right_context, 'num_log_count': num_log_count, 'variance_floor': variance_floor, 'output_stddevs': output_stddevs, } node_name = extraction_node.name + '.fused' inputs = extraction_node.inputs extract_pooling_node = make_node(node_name, KaldiOpType.ExtractPooling.name, inputs, [node_name], extract_pooling_attrs) return extract_pooling_node, extract_pooling_pack return None, None def fuse_nodes(self): fused_nodes = [] nodes_to_remove = list() outputs_to_replace = dict() inputs_to_replace = dict() for node in self._nodes: if node.type == KaldiOpType.StatisticsPooling.name and\ self._fuse_stats: extract_pooling_node, extract_pooling_pack = \ self.check_fuse_extraction_pooling(node) if extract_pooling_node is not None: fused_nodes.append(extract_pooling_node) extraction_node = extract_pooling_pack[0] pooling_node = extract_pooling_pack[1] outputs_to_replace[extraction_node.name] = \ extract_pooling_node.name inputs_to_replace[pooling_node.name] = \ extract_pooling_node.name nodes_to_remove.extend(extract_pooling_pack) elif node.type == KaldiOpType.LstmNonlinear.name and \ self._fuse_lstm: check_before, lstm_input, ifdef_inputs, nodes_before = \ self.check_before_lstm(node) if check_before: check_after, lstm_output, nodes_after = \ self.check_after_lstm(node, ifdef_inputs) if check_after: dynamic_lstm_node = \ self.fuse_dynamic_lstm(node, lstm_input, nodes_before, nodes_after) outputs_to_replace[lstm_input] = \ dynamic_lstm_node.name inputs_to_replace[lstm_output] = \ dynamic_lstm_node.name nodes_to_remove.extend(nodes_before) nodes_to_remove.append(node) nodes_to_remove.extend(nodes_after) fused_nodes.append(dynamic_lstm_node) if len(fused_nodes) > 0: self.update_with_fused_nodes(fused_nodes, nodes_to_remove, outputs_to_replace, inputs_to_replace) def precompute(self): _LOG.info("Precompute") for node in self._nodes: node.precompute() def convert_initializers(self): for const_name in self._consts: const = self._consts[const_name] tensor = self.kaldi_to_onnx_tensor(const, const_name) self._initializers[tensor.name] = tensor @staticmethod def make_name(name): """Make op name for inserted ops.""" global INTERNAL_NAME INTERNAL_NAME += 1 return "{}__{}".format(name, INTERNAL_NAME) @staticmethod def make_onnx_shape(shape): return [self.make_name("unk") if i == -1 else i for i in shape] @staticmethod def find_opset(opset): if opset is None or opset == 0: opset = defs.onnx_opset_version() if opset > PREFERRED_OPSET: opset = PREFERRED_OPSET return opset @staticmethod def kaldi_to_onnx_tensor(tensor, name=""): onnx_tensor = numpy_helper.from_array(tensor, name=name) return onnx_tensor def make_model(self): _LOG.info("start making ONNX model.") # add placeholders self.init_inputs() output_tensor_values = [] for name in self._outputs: v = helper.make_tensor_value_info( name, onnx_pb.TensorProto.FLOAT, self.make_onnx_shape(self._shapes[name])) output_tensor_values.append(v) onnx_nodes = [] for node in self._nodes: if node.type not in['Input', 'Output']: try: input_names = node.inputs output_names = node.outputs onnx_node = helper.make_node(node.type, input_names, output_names, name=node.name, domain=self._operatorsetid, **node.attrs) onnx_nodes.append(onnx_node) except Exception as ex: node.info() raise Exception('convert failed for node: {0} err: {1}' .format(node.type, ex)) self.convert_initializers() all_inputs = [] for node in self._nodes: all_inputs.extend(node.inputs) initializers = [i for i in list(self._initializers.values()) if i.name in all_inputs] input_with_initializers = [] initializers_names = [] for initializer in initializers: val = helper.make_tensor_value_info(initializer.name, initializer.data_type, self.make_onnx_shape( initializer.dims)) input_with_initializers.append(val) initializers_names.append(initializer.name) input_with_initializers.extend( list(self._model_input_tensors.values())) input_tensors_names = [i for i in all_inputs if i not in initializers_names or i not in self._inputs] internal_inputs = [] for name in input_tensors_names: val = helper.make_tensor_value_info(name, onnx_pb.TensorProto.FLOAT, self.make_onnx_shape( self._shapes[name])) internal_inputs.append(val) graph = helper.make_graph(onnx_nodes, self._producer_name, input_with_initializers, output_tensor_values, initializer=initializers, value_info=internal_inputs) metadata_props = {"left_context": str(self._left_context), "right_context": str(self._right_context), "chunk_size": str(self._chunk_size), "modulus": str(self._modulus), "subsample_factor": str(self._subsample_factor)} kwargs = {"producer_name": self._producer_name, "producer_version": self._producer_version} opsets = [] imp = helper.make_operatorsetid(self._operatorsetid, 1) imp.version = self._opset opsets.append(imp) if self._extra_opset is not None: opsets.extend(self._extra_opset) kwargs["opset_imports"] = opsets model_proto = helper.make_model(graph, **kwargs) helper.set_model_props(model_proto, metadata_props) checker.check_model(model_proto) return model_proto @property def opset(self): return self._opset @property def initializers(self): return self._initializers def set_shape(self, name, val): if isinstance(val, np.ndarray): val = val.tolist() self._shapes[name] = val def get_shape(self, name): assert isinstance(name, six.text_type) shape = self._shapes.get(name) if shape: for
= bi_connector __props__.__dict__["bi_connector_config"] = bi_connector_config __props__.__dict__["cloud_backup"] = cloud_backup __props__.__dict__["cluster_type"] = cluster_type __props__.__dict__["disk_size_gb"] = disk_size_gb __props__.__dict__["encryption_at_rest_provider"] = encryption_at_rest_provider __props__.__dict__["labels"] = labels __props__.__dict__["mongo_db_major_version"] = mongo_db_major_version __props__.__dict__["name"] = name __props__.__dict__["num_shards"] = num_shards __props__.__dict__["pit_enabled"] = pit_enabled if project_id is None and not opts.urn: raise TypeError("Missing required property 'project_id'") __props__.__dict__["project_id"] = project_id __props__.__dict__["provider_auto_scaling_compute_max_instance_size"] = provider_auto_scaling_compute_max_instance_size __props__.__dict__["provider_auto_scaling_compute_min_instance_size"] = provider_auto_scaling_compute_min_instance_size if provider_backup_enabled is not None and not opts.urn: warnings.warn("""This field is deprecated,please use cloud_backup instead""", DeprecationWarning) pulumi.log.warn("""provider_backup_enabled is deprecated: This field is deprecated,please use cloud_backup instead""") __props__.__dict__["provider_backup_enabled"] = provider_backup_enabled __props__.__dict__["provider_disk_iops"] = provider_disk_iops __props__.__dict__["provider_disk_type_name"] = provider_disk_type_name if provider_encrypt_ebs_volume is not None and not opts.urn: warnings.warn("""All EBS volumes are encrypted by default, the option to disable encryption has been removed""", DeprecationWarning) pulumi.log.warn("""provider_encrypt_ebs_volume is deprecated: All EBS volumes are encrypted by default, the option to disable encryption has been removed""") __props__.__dict__["provider_encrypt_ebs_volume"] = provider_encrypt_ebs_volume if provider_instance_size_name is None and not opts.urn: raise TypeError("Missing required property 'provider_instance_size_name'") __props__.__dict__["provider_instance_size_name"] = provider_instance_size_name if provider_name is None and not opts.urn: raise TypeError("Missing required property 'provider_name'") __props__.__dict__["provider_name"] = provider_name __props__.__dict__["provider_region_name"] = provider_region_name __props__.__dict__["provider_volume_type"] = provider_volume_type __props__.__dict__["replication_factor"] = replication_factor __props__.__dict__["replication_specs"] = replication_specs __props__.__dict__["cluster_id"] = None __props__.__dict__["connection_strings"] = None __props__.__dict__["container_id"] = None __props__.__dict__["mongo_db_version"] = None __props__.__dict__["mongo_uri"] = None __props__.__dict__["mongo_uri_updated"] = None __props__.__dict__["mongo_uri_with_options"] = None __props__.__dict__["paused"] = None __props__.__dict__["provider_encrypt_ebs_volume_flag"] = None __props__.__dict__["snapshot_backup_policies"] = None __props__.__dict__["srv_address"] = None __props__.__dict__["state_name"] = None super(Cluster, __self__).__init__( 'mongodbatlas:index/cluster:Cluster', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, advanced_configuration: Optional[pulumi.Input[pulumi.InputType['ClusterAdvancedConfigurationArgs']]] = None, auto_scaling_compute_enabled: Optional[pulumi.Input[bool]] = None, auto_scaling_compute_scale_down_enabled: Optional[pulumi.Input[bool]] = None, auto_scaling_disk_gb_enabled: Optional[pulumi.Input[bool]] = None, backing_provider_name: Optional[pulumi.Input[str]] = None, backup_enabled: Optional[pulumi.Input[bool]] = None, bi_connector: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, bi_connector_config: Optional[pulumi.Input[pulumi.InputType['ClusterBiConnectorConfigArgs']]] = None, cloud_backup: Optional[pulumi.Input[bool]] = None, cluster_id: Optional[pulumi.Input[str]] = None, cluster_type: Optional[pulumi.Input[str]] = None, connection_strings: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterConnectionStringArgs']]]]] = None, container_id: Optional[pulumi.Input[str]] = None, disk_size_gb: Optional[pulumi.Input[float]] = None, encryption_at_rest_provider: Optional[pulumi.Input[str]] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterLabelArgs']]]]] = None, mongo_db_major_version: Optional[pulumi.Input[str]] = None, mongo_db_version: Optional[pulumi.Input[str]] = None, mongo_uri: Optional[pulumi.Input[str]] = None, mongo_uri_updated: Optional[pulumi.Input[str]] = None, mongo_uri_with_options: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, num_shards: Optional[pulumi.Input[int]] = None, paused: Optional[pulumi.Input[bool]] = None, pit_enabled: Optional[pulumi.Input[bool]] = None, project_id: Optional[pulumi.Input[str]] = None, provider_auto_scaling_compute_max_instance_size: Optional[pulumi.Input[str]] = None, provider_auto_scaling_compute_min_instance_size: Optional[pulumi.Input[str]] = None, provider_backup_enabled: Optional[pulumi.Input[bool]] = None, provider_disk_iops: Optional[pulumi.Input[int]] = None, provider_disk_type_name: Optional[pulumi.Input[str]] = None, provider_encrypt_ebs_volume: Optional[pulumi.Input[bool]] = None, provider_encrypt_ebs_volume_flag: Optional[pulumi.Input[bool]] = None, provider_instance_size_name: Optional[pulumi.Input[str]] = None, provider_name: Optional[pulumi.Input[str]] = None, provider_region_name: Optional[pulumi.Input[str]] = None, provider_volume_type: Optional[pulumi.Input[str]] = None, replication_factor: Optional[pulumi.Input[int]] = None, replication_specs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterReplicationSpecArgs']]]]] = None, snapshot_backup_policies: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterSnapshotBackupPolicyArgs']]]]] = None, srv_address: Optional[pulumi.Input[str]] = None, state_name: Optional[pulumi.Input[str]] = None) -> 'Cluster': """ Get an existing Cluster resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[bool] auto_scaling_compute_enabled: Specifies whether cluster tier auto-scaling is enabled. The default is false. - Set to `true` to enable cluster tier auto-scaling. If enabled, you must specify a value for `providerSettings.autoScaling.compute.maxInstanceSize`. - Set to `false` to disable cluster tier auto-scaling. :param pulumi.Input[bool] auto_scaling_compute_scale_down_enabled: Set to `true` to enable the cluster tier to scale down. This option is only available if `autoScaling.compute.enabled` is `true`. - If this option is enabled, you must specify a value for `providerSettings.autoScaling.compute.minInstanceSize` :param pulumi.Input[bool] auto_scaling_disk_gb_enabled: Specifies whether disk auto-scaling is enabled. The default is true. - Set to `true` to enable disk auto-scaling. - Set to `false` to disable disk auto-scaling. :param pulumi.Input[str] backing_provider_name: Cloud service provider on which the server for a multi-tenant cluster is provisioned. :param pulumi.Input[bool] backup_enabled: Clusters running MongoDB FCV 4.2 or later and any new Atlas clusters of any type do not support this parameter :param pulumi.Input[Mapping[str, pulumi.Input[str]]] bi_connector: Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. **DEPRECATED** Use `bi_connector_config` instead. :param pulumi.Input[pulumi.InputType['ClusterBiConnectorConfigArgs']] bi_connector_config: Specifies BI Connector for Atlas configuration on this cluster. BI Connector for Atlas is only available for M10+ clusters. See BI Connector below for more details. :param pulumi.Input[bool] cloud_backup: Flag indicating if the cluster uses Cloud Backup for backups. :param pulumi.Input[str] cluster_id: The cluster ID. :param pulumi.Input[str] cluster_type: Specifies the type of the cluster that you want to modify. You cannot convert a sharded cluster deployment to a replica set deployment. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClusterConnectionStringArgs']]]] connection_strings: Set of connection strings that your applications use to connect to this cluster. More info in [Connection-strings](https://docs.mongodb.com/manual/reference/connection-string/). Use the parameters in this object to connect your applications to this cluster. To learn more about the formats of connection strings, see [Connection String Options](https://docs.atlas.mongodb.com/reference/faq/connection-changes/). NOTE: Atlas returns the contents of this object after the cluster is operational, not while it builds the cluster. :param pulumi.Input[str] container_id: The Network Peering Container ID. The id of the container either created programmatically by the user before any clusters existed in the project or when the first cluster in the region (AWS/Azure) or project (GCP) was created. :param pulumi.Input[float] disk_size_gb: Capacity, in gigabytes, of the host’s root volume. Increase this number to add capacity, up to a maximum possible value of 4096 (i.e., 4 TB). This value must be a positive integer. * The minimum disk size for dedicated clusters is 10GB for AWS and GCP. If you specify diskSizeGB with a lower disk size, Atlas defaults to the minimum disk size value. * Note: The maximum value for disk storage cannot exceed 50 times the maximum RAM for the selected cluster. If you require additional storage space beyond this limitation, consider upgrading your cluster to a higher tier. * Cannot be used with clusters with local NVMe SSDs * Cannot be used with Azure clusters :param pulumi.Input[str] encryption_at_rest_provider: Possible values are AWS, GCP, AZURE or NONE. Only needed if you desire to manage the keys, see [Encryption at Rest using Customer Key Management](https://docs.atlas.mongodb.com/security-aws-kms/) for complete documentation. You must configure encryption at rest for the Atlas project before enabling it on any cluster in the project. For complete documentation on configuring Encryption at Rest, see Encryption at Rest using Customer Key Management. Requires M10 or greater. and for legacy backups, backup_enabled, to be false or omitted. **Note: Atlas encrypts all cluster storage and snapshot volumes, securing all cluster data on disk: a concept known as encryption at rest, by default**. :param pulumi.Input[str] mongo_db_major_version: Version of the cluster to deploy. Atlas supports the following MongoDB versions for M10+ clusters: `3.6`, `4.0`, or `4.2`. You must set this value to `4.2` if `provider_instance_size_name` is either M2 or M5. :param pulumi.Input[str] mongo_db_version: Version of MongoDB the cluster runs, in `major-version`.`minor-version` format. :param pulumi.Input[str] mongo_uri: Base connection string for the cluster. Atlas only displays this field after the cluster is operational, not while it builds the cluster. :param pulumi.Input[str] mongo_uri_updated: Lists when the connection string was last updated. The connection string changes, for example, if you change a replica set to a sharded cluster. :param pulumi.Input[str] mongo_uri_with_options: connection string for connecting to the Atlas cluster. Includes the replicaSet, ssl, and authSource query parameters in the connection string with values appropriate for the cluster. :param pulumi.Input[str] name: Name of the cluster as it appears in Atlas. Once the cluster is created, its name cannot be changed. :param pulumi.Input[int] num_shards: Number of shards to deploy in the specified zone, minimum 1. :param pulumi.Input[bool] paused: Flag that indicates whether the cluster is paused or not. :param pulumi.Input[bool] pit_enabled: - Flag that indicates if the cluster uses Continuous Cloud Backup. If set to true, cloud_backup must also be set to true. :param pulumi.Input[str] project_id: The unique ID for the project to create the database user. :param pulumi.Input[str] provider_auto_scaling_compute_max_instance_size: Maximum instance size to which your cluster can automatically scale (e.g., M40). Required if `autoScaling.compute.enabled` is `true`. :param pulumi.Input[str] provider_auto_scaling_compute_min_instance_size: Minimum instance size to which your cluster can automatically scale (e.g.,
<reponame>mwawrzos/logging<filename>log_parsers/parse_mlperf.py # Copyright 2018 The MLPerf Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import os import re import csv import sys import argparse # Third-party modules import dash import dash_core_components as dcc import dash_html_components as html import plotly.express as pex import plotly.graph_objects as pgo import dateutil.parser import pandas import numpy from dash.dependencies import Input, Output, State, ALL from datetime import datetime, timedelta from collections import defaultdict # Global Variables -- User Modifiable # g_power_window* : how much time before (BEGIN) and after (END) loadgen timestamps to show data in graph g_power_window_before_add_td = timedelta(seconds=0) g_power_window_before_sub_td = timedelta(seconds=30) g_power_window_after_add_td = timedelta(seconds=30) g_power_window_after_sub_td = timedelta(seconds=0) # Global Variables -- Do not modify g_power_add_td = timedelta(seconds=0) g_power_sub_td = timedelta(seconds=0) g_loadgen_data = defaultdict(dict) g_graph_data = defaultdict(dict) g_figures = defaultdict(dict) g_verbose = False app = dash.Dash(__name__) # Check command-line parameters and call respective functions def main(): m_args = f_parseParameters() if( m_args.loadgen_in != "" ): f_parse_Loadgen( m_args.loadgen_in, m_args.loadgen_out, m_args.workload ) if( m_args.specpower_in != "" ): f_parse_SPECPowerlog( m_args.specpower_in, m_args.powerlog_out ) if( m_args.stats is not None and m_args.loadgen_out != "" and m_args.powerlog_out != "" ): f_stats( m_args.loadgen_out, m_args.powerlog_out, m_args.stats, m_args.csv ) if( m_args.graph is not None and m_args.loadgen_out != "" and m_args.powerlog_out != "" ): f_graph( m_args.loadgen_out, m_args.powerlog_out, m_args.graph ) def f_stats( p_loadgen_csv, p_power_csv, p_filter , p_stats_csv ): m_loadgen_data = pandas.DataFrame() m_power_data = pandas.DataFrame() if( p_stats_csv ): m_stats_frame = pandas.DataFrame( columns=['Run', 'Workload', 'Scenario', 'Mode', 'Begin Time', 'End Time', 'Runtime', 'Samples', 'Data', 'Minimum', 'Maximum', 'Average', 'Std.Dev'] ) # Open loadgen data try: if( g_verbose ) : print( f"stats: opening {p_loadgen_csv} for reading" ) m_loadgen_data = pandas.read_csv( p_loadgen_csv ) except: print( f"stats: error opening file: {p_loadgen_csv}" ) exit(1) # Open power data try: if( g_verbose ) : print( f"stats: opening {p_power_csv} for reading" ) m_power_data = pandas.read_csv( p_power_csv ) except: print( f"stats: error opening file: {p_power_csv}", ) exit(1) # Combine Date/Time and drop Time m_power_data['Date'] = m_power_data['Date'] + " " + m_power_data['Time'] m_power_data.rename( columns = {'Date' : 'Datetime'}, inplace = True ) m_power_data['Datetime'] = pandas.to_datetime( m_power_data['Datetime'] ) m_power_data = m_power_data.drop( columns=['Time'] ) m_power_data.set_index( 'Datetime' ) m_dataset_count = 0 if( g_verbose ) : print( "stats: loading and parsing data, please wait" ) for index, m_loadgen_entry in m_loadgen_data.iterrows(): m_power_ts_begin = dateutil.parser.parse( m_loadgen_entry['System Begin Date'] + " " + m_loadgen_entry['System Begin Time'] ) m_power_ts_end = dateutil.parser.parse( m_loadgen_entry['System End Date'] + " " + m_loadgen_entry['System End Time'] ) m_mask_stats = (m_power_data['Datetime'] >= (m_power_ts_begin + g_power_add_td - g_power_sub_td )) & \ (m_power_data['Datetime'] <= (m_power_ts_end + g_power_add_td - g_power_sub_td )) m_dataframe = m_power_data.loc[m_mask_stats].copy() if( m_dataframe.empty ): continue else: m_dataset_count += 1 if( p_stats_csv ) : m_stats_list = [] for m_header in list(m_dataframe) : if( p_filter and not re.findall(r"("+'|'.join(p_filter)+r")", m_header) ): continue m_data = m_dataframe[m_header] if( m_data.dtypes not in [numpy.int64, numpy.float64] ): continue m_stats_list.append( { 'Run' : m_dataset_count, 'Workload' : m_loadgen_entry['Workload'], 'Scenario' : m_loadgen_entry['Scenario'], 'Mode' : m_loadgen_entry['Mode'], 'Begin Time' : f"{m_power_ts_begin}", 'End Time' : f"{m_power_ts_end}", 'Runtime' : f"{m_power_ts_end - m_power_ts_begin}", 'Metric' : m_loadgen_entry['Metric'], 'Score' : m_loadgen_entry['Score'], 'Samples' : m_dataframe.shape[0], 'Data' : m_header, 'Minimum' : f"{m_data.min():.3f}", 'Maximum' : f"{m_data.max():.3f}", 'Average' : f"{m_data.mean():.3f}", 'Std.Dev' : f"{m_data.std():.3f}" } ) if( re.search( "watts?|power", m_header, re.I ) ): m_stats_list[-1].update( {'Energy' : f"{ float(m_stats_list[-1]['Average']) * (dateutil.parser.parse(m_stats_list[-1]['End Time']) - dateutil.parser.parse(m_stats_list[-1]['Begin Time'])).total_seconds():.3f}"} ) m_stats_frame = m_stats_frame.append( pandas.DataFrame( m_stats_list ) ) else: print( f"Run: {m_dataset_count}\n" + f"Workload: {m_loadgen_entry['Workload']}\n" + f"Scenario: {m_loadgen_entry['Scenario']}\n" + f"Mode: {m_loadgen_entry['Mode']}\n" + f"Begin Time: {m_loadgen_entry['System Begin Date']} {m_loadgen_entry['System Begin Time']}\n" + f"End Time: {m_loadgen_entry['System End Date']} {m_loadgen_entry['System End Time']}\n" + f"Runtime: {(m_power_ts_end - m_power_ts_begin)}\n" + f"Metric: {m_loadgen_entry['Metric']}\n" + f"Score: {m_loadgen_entry['Score']}\n" + f"Samples: {m_dataframe.shape[0]}\n" ) for m_header in list(m_dataframe) : if( p_filter and not re.findall(r"("+'|'.join(p_filter)+r")", m_header) ): continue m_data = m_dataframe[m_header] if( m_data.dtypes not in [numpy.int64, numpy.float64] ): #if( g_verbose ) : print( f"stats: {m_header} dtype is {m_data.dtypes}" ) continue print( f"Data: {m_header}\n" + f"Minimum: {m_data.min():.3f}\n" + f"Maximum: {m_data.max():.3f}\n" + f"Average: {m_data.mean():.3f}\n" + f"Std.Dev: {m_data.std():.3f}\n" ) if( re.search( "\bwatts?\b|\bpower\b", m_header, re.I ) ): print( f"Energy: {(m_data.mean() * (m_power_ts_end - m_power_ts_begin).total_seconds()):.3f}\n" ) if( g_verbose ) : print( f"stats: {m_dataset_count} entries parsed" ) if( not m_dataset_count ): print( "*** ERROR: no data collated!" ) print( " check loadgen and data timestamps for timing mismatches and/or use --deskew [seconds] to realign" ) exit(1) if( p_stats_csv ): try: if( g_verbose ) : print( f"stats: saving stats to {p_stats_csv}\n" ) m_stats_frame.to_csv( p_stats_csv, index=False ) except: print( f"stats: error while creating csv output file: {p_stats_csv}" ) exit(1) #### Graph data over time #### Parses the loadgen data for BEGIN and END times #### Parses the power data (or any CSV data with a header) and tries to plot over time def f_graph( p_loadgen_csv, p_power_csv, p_filter ): m_loadgen_data = pandas.DataFrame() m_graph_data = pandas.DataFrame() # Open loadgen data try: if( g_verbose ) : print( f"graph: opening {p_loadgen_csv} for reading" ) m_loadgen_data = pandas.read_csv( p_loadgen_csv ) except: print( f"graph: error opening file: {p_loadgen_csv}" ) exit(1) # Open power/raw data try: if( g_verbose ) : print( f"graph: opening {p_power_csv} for reading" ) m_graph_data = pandas.read_csv( p_power_csv ) except: print( f"graph: error opening file: {p_power_csv}", ) exit(1) # Combine Date/Time and drop Time m_graph_data['Date'] = m_graph_data['Date'] + " " + m_graph_data['Time'] m_graph_data.rename( columns = {'Date' : 'Datetime'}, inplace = True ) m_graph_data['Datetime'] = pandas.to_datetime( m_graph_data['Datetime'] ) m_graph_data = m_graph_data.drop( columns=['Time'] ) m_graph_data.set_index( 'Datetime' ) m_dataset_count = 0 if( g_verbose ) : print( "graph: Loading and parsing data, please wait" ) for index, m_loadgen_entry in m_loadgen_data.iterrows(): m_power_ts_begin = dateutil.parser.parse( m_loadgen_entry['System Begin Date'] + " " + m_loadgen_entry['System Begin Time'] ) m_power_ts_end = dateutil.parser.parse( m_loadgen_entry['System End Date'] + " " + m_loadgen_entry['System End Time'] ) m_mask_stats = (m_graph_data['Datetime'] >= (m_power_ts_begin + g_power_add_td - g_power_sub_td )) & \ (m_graph_data['Datetime'] <= (m_power_ts_end + g_power_add_td - g_power_sub_td )) m_mask_graph = (m_graph_data['Datetime'] >= (m_power_ts_begin + g_power_add_td - g_power_sub_td + g_power_window_before_add_td - g_power_window_before_sub_td )) & \ (m_graph_data['Datetime'] <= (m_power_ts_end + g_power_add_td - g_power_sub_td + g_power_window_after_add_td - g_power_window_after_sub_td )) m_dataframe = m_graph_data.loc[m_mask_graph].copy() if( m_dataframe.empty or m_graph_data.loc[m_mask_stats].empty ): continue for m_header in m_graph_data.columns[1:] : if( not re.findall(r"("+'|'.join(p_filter)+r")", m_header) ): continue if( not g_figures[m_header] ): g_figures[m_header] = pgo.Figure() g_figures[m_header].update_layout( title={'text' : f'{m_header} vs. Time', 'x' : 0.5, 'y' : 0.95, 'xanchor': 'center', 'yanchor': 'top' }, xaxis_title="Time (offset between powerlog & loadgen timestamps)", xaxis_tickformat='%H:%M:%S.%L', yaxis_title=f"{m_header}" ) # Zero the timescale to difference between loadgen and data timestamps # Zero'ing causes datetime to be a timedelta, add an "arbitrary" date to convert back into datetime m_dataframe.loc[:,'Datetime'] -= m_dataframe['Datetime'].iloc[0] m_dataframe.loc[:,'Datetime'] += datetime( 2011, 1, 13 ) g_figures[m_header].add_trace( pgo.Scatter( x=m_dataframe['Datetime'], y=m_dataframe[m_header], mode="lines+markers", line=dict(color=pex.colors.qualitative.Plotly[m_dataset_count%len(pex.colors.qualitative.Plotly)]), marker=dict(color=pex.colors.qualitative.Plotly[m_dataset_count%len(pex.colors.qualitative.Plotly)]), name=f"run {m_dataset_count}, {m_loadgen_entry['Workload']}, {m_loadgen_entry['Scenario']}, {m_loadgen_entry['Mode']}", visible=True ) ) # Draw the loadgen runtime below the graph g_figures[m_header].add_vrect( x0=m_graph_data.loc[m_mask_stats]['Datetime'].iloc[0] - m_graph_data.loc[m_mask_graph]['Datetime'].iloc[0] + datetime( 2011, 1, 13 ), x1=m_graph_data.loc[m_mask_stats]['Datetime'].iloc[-1] - m_graph_data.loc[m_mask_graph]['Datetime'].iloc[0] + datetime( 2011, 1, 13 ), # y1=m_graph_data.loc[m_mask_stats][m_header].max(), fillcolor=pex.colors.qualitative.Plotly[m_dataset_count%len(pex.colors.qualitative.Plotly)], opacity=0.20, layer="below", line_width=0, # annotation_text=f"loadgen range for run {m_dataset_count}", #, {m_loadgen_entry['Workload']}, {m_loadgen_entry['Scenario']}, {m_loadgen_entry['Mode']}", # annotation_position="bottom left", visible=True ) g_graph_data[m_dataset_count] = m_graph_data.loc[m_mask_stats] g_loadgen_data[m_dataset_count] = m_loadgen_entry m_dataset_count += 1 if( g_verbose ) : print( "graph: data parsing complete. building components" ) # Build list of graphs, dropdown options m_dcc_graphs = [] m_dcc_dropdown = [] m_counter = 0 for m_key in g_figures : m_dcc_graphs.append( dcc.Graph( id={ 'type' : 'graph-obj', 'data' : f'{m_key}', 'index': m_counter }, figure=g_figures[m_key], style ={'height':'70vh', 'display':'none'} ) ) m_dcc_dropdown.append( {'label':f'{m_key}', 'value':m_counter } ) m_counter += 1 if( not m_counter ): print( "*** ERROR: No data collated!" ) print( " Check loadgen and data timestamp for timing mismatches and/or use --deskew [seconds] to realign" ) exit(1) app.layout = html.Div([ html.Div( id="div-filter-options", children=[ html.Div( ["Filter Dataset by Keywords (i.e. 'resnet ssd-large'): ", dcc.Input(id='input-box-filter-by-keywords', type='text') ] ), html.Div( ["Filter Dataset by Run IDs (i.e. '1, 2,
become 0-dim arrays >>> price = close = np.random.uniform(1, 10, size=target_shape) >>> size_type = np.asarray(SizeType.TargetPercent) >>> direction = np.asarray(Direction.LongOnly) >>> fees = np.asarray(0.001) >>> fixed_fees = np.asarray(1.) >>> slippage = np.asarray(0.001) >>> order_records, log_records = flex_simulate_nb( ... target_shape, ... group_lens, ... init_cash, ... cash_sharing, ... segment_mask=segment_mask, ... pre_sim_func_nb=pre_sim_func_nb, ... post_sim_func_nb=post_sim_func_nb, ... pre_group_func_nb=pre_group_func_nb, ... post_group_func_nb=post_group_func_nb, ... pre_segment_func_nb=pre_segment_func_nb, ... pre_segment_args=(size, price, size_type, direction), ... post_segment_func_nb=post_segment_func_nb, ... flex_order_func_nb=flex_order_func_nb, ... flex_order_args=(size, price, size_type, direction, fees, fixed_fees, slippage), ... post_order_func_nb=post_order_func_nb ... ) before simulation before group 0 before segment 0 creating order 0 at column 0 order status: 0 creating order 1 at column 1 order status: 0 creating order 2 at column 2 order status: 0 breaking out of the loop after segment 0 before segment 2 creating order 0 at column 1 order status: 0 creating order 1 at column 2 order status: 0 creating order 2 at column 0 order status: 0 breaking out of the loop after segment 2 before segment 4 creating order 0 at column 0 order status: 0 creating order 1 at column 2 order status: 0 creating order 2 at column 1 order status: 0 breaking out of the loop after segment 4 after group 0 after simulation ``` """ check_group_lens_nb(group_lens, target_shape[1]) check_group_init_cash_nb(group_lens, target_shape[1], init_cash, cash_sharing) order_records, log_records = init_records_nb(target_shape, max_orders, max_logs) init_cash = init_cash.astype(np.float_) last_cash = init_cash.copy() last_position = np.full(target_shape[1], 0., dtype=np.float_) last_debt = np.full(target_shape[1], 0., dtype=np.float_) last_free_cash = init_cash.copy() last_val_price = np.full(target_shape[1], np.nan, dtype=np.float_) last_value = init_cash.copy() second_last_value = init_cash.copy() temp_value = init_cash.copy() last_return = np.full_like(last_value, np.nan) last_pos_record = np.empty(target_shape[1], dtype=trade_dt) last_pos_record['id'][:] = -1 last_oidx = np.full(target_shape[1], -1, dtype=np.int_) last_lidx = np.full(target_shape[1], -1, dtype=np.int_) oidx = 0 lidx = 0 # Call function before the simulation pre_sim_ctx = SimulationContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record ) pre_sim_out = pre_sim_func_nb(pre_sim_ctx, *pre_sim_args) from_col = 0 for group in range(len(group_lens)): to_col = from_col + group_lens[group] group_len = to_col - from_col # Call function before the group pre_group_ctx = GroupContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col ) pre_group_out = pre_group_func_nb(pre_group_ctx, *pre_sim_out, *pre_group_args) for i in range(target_shape[0]): # Is this segment active? if call_pre_segment or segment_mask[i, group]: # Call function before the segment pre_seg_ctx = SegmentContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col, i=i, call_seq_now=None ) pre_segment_out = pre_segment_func_nb(pre_seg_ctx, *pre_group_out, *pre_segment_args) # Update open position stats if fill_pos_record: for col in range(from_col, to_col): update_open_pos_stats_nb( last_pos_record[col], last_position[col], last_val_price[col] ) # Update value and return if cash_sharing: last_value[group] = get_group_value_nb( from_col, to_col, last_cash[group], last_position, last_val_price ) last_return[group] = returns_nb.get_return_nb(second_last_value[group], last_value[group]) else: for col in range(from_col, to_col): if last_position[col] == 0: last_value[col] = last_cash[col] else: last_value[col] = last_cash[col] + last_position[col] * last_val_price[col] last_return[col] = returns_nb.get_return_nb(second_last_value[col], last_value[col]) # Is this segment active? if segment_mask[i, group]: call_idx = -1 while True: call_idx += 1 # Generate the next order flex_order_ctx = FlexOrderContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col, i=i, call_seq_now=None, call_idx=call_idx ) col, order = flex_order_func_nb(flex_order_ctx, *pre_segment_out, *flex_order_args) if col == -1: break if col < from_col or col >= to_col: raise ValueError("Column exceeds bounds of the group") # Get current values position_now = last_position[col] debt_now = last_debt[col] val_price_now = last_val_price[col] pos_record_now = last_pos_record[col] if cash_sharing: cash_now = last_cash[group] free_cash_now = last_free_cash[group] value_now = last_value[group] return_now = last_return[group] else: cash_now = last_cash[col] free_cash_now = last_free_cash[col] value_now = last_value[col] return_now = last_return[col] if np.isinf(order.price): if i > 0: _prev_close = flex_select_auto_nb(close, i - 1, col, flex_2d) else: _prev_close = np.nan _close = flex_select_auto_nb(close, i, col, flex_2d) order = replace_inf_price_nb(_prev_close, _close, order) # Process the order state = ProcessOrderState( cash=cash_now, position=position_now, debt=debt_now, free_cash=free_cash_now, val_price=val_price_now, value=value_now, oidx=oidx, lidx=lidx ) order_result, new_state = process_order_nb( i, col, group, state, update_value, order, order_records, log_records ) # Update state cash_now = new_state.cash position_now = new_state.position debt_now = new_state.debt free_cash_now = new_state.free_cash val_price_now = new_state.val_price value_now = new_state.value if cash_sharing: return_now = returns_nb.get_return_nb(second_last_value[group], value_now) else: return_now = returns_nb.get_return_nb(second_last_value[col], value_now) oidx = new_state.oidx lidx = new_state.lidx # Now becomes last last_position[col] = position_now last_debt[col] = debt_now if not np.isnan(val_price_now) or not ffill_val_price: last_val_price[col] = val_price_now if cash_sharing: last_cash[group] = cash_now last_free_cash[group] = free_cash_now last_value[group] = value_now last_return[group] = return_now else: last_cash[col] = cash_now last_free_cash[col] = free_cash_now last_value[col] = value_now last_return[col] = return_now if state.oidx != new_state.oidx: last_oidx[col] = state.oidx if state.lidx != new_state.lidx: last_lidx[col] = state.lidx # Update position record if fill_pos_record: update_pos_record_nb( pos_record_now, i, col, state.position, position_now, order_result ) # Post-order callback post_order_ctx = PostOrderContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col, i=i, call_seq_now=None, col=col, call_idx=call_idx, cash_before=state.cash, position_before=state.position, debt_before=state.debt, free_cash_before=state.free_cash, val_price_before=state.val_price, value_before=state.value, order_result=order_result, cash_now=cash_now, position_now=position_now, debt_now=debt_now, free_cash_now=free_cash_now, val_price_now=val_price_now, value_now=value_now, return_now=return_now, pos_record_now=pos_record_now ) post_order_func_nb(post_order_ctx, *pre_segment_out, *post_order_args) # NOTE: Regardless of segment_mask, we still need to update stats to be accessed by future rows # Update valuation price for col in range(from_col, to_col): _close = flex_select_auto_nb(close, i, col, flex_2d) if not np.isnan(_close) or not ffill_val_price: last_val_price[col] = _close # Update previous value, current value and return if cash_sharing: last_value[group] = get_group_value_nb( from_col, to_col, last_cash[group], last_position, last_val_price ) second_last_value[group] = temp_value[group] temp_value[group] = last_value[group] last_return[group] = returns_nb.get_return_nb(second_last_value[group], last_value[group]) else: for col in range(from_col, to_col): if last_position[col] == 0: last_value[col] = last_cash[col] else: last_value[col] = last_cash[col] + last_position[col] * last_val_price[col] second_last_value[col] = temp_value[col] temp_value[col] = last_value[col] last_return[col] = returns_nb.get_return_nb(second_last_value[col], last_value[col]) # Update open position stats if fill_pos_record: for col in range(from_col, to_col): update_open_pos_stats_nb( last_pos_record[col], last_position[col], last_val_price[col] ) # Is this segment active? if call_post_segment or segment_mask[i, group]: # Call function before the segment post_seg_ctx = SegmentContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col, i=i, call_seq_now=None ) post_segment_func_nb(post_seg_ctx, *pre_group_out, *post_segment_args) # Call function after the group post_group_ctx = GroupContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record, group=group, group_len=group_len, from_col=from_col, to_col=to_col ) post_group_func_nb(post_group_ctx, *pre_sim_out, *post_group_args) from_col = to_col # Call function after the simulation post_sim_ctx = SimulationContext( target_shape=target_shape, group_lens=group_lens, init_cash=init_cash, cash_sharing=cash_sharing, call_seq=None, segment_mask=segment_mask, call_pre_segment=call_pre_segment, call_post_segment=call_post_segment, close=close, ffill_val_price=ffill_val_price, update_value=update_value, fill_pos_record=fill_pos_record, flex_2d=flex_2d, order_records=order_records, log_records=log_records, last_cash=last_cash, last_position=last_position, last_debt=last_debt, last_free_cash=last_free_cash, last_val_price=last_val_price, last_value=last_value, second_last_value=second_last_value, last_return=last_return, last_oidx=last_oidx, last_lidx=last_lidx, last_pos_record=last_pos_record ) post_sim_func_nb(post_sim_ctx, *post_sim_args) return order_records[:oidx], log_records[:lidx] @njit def flex_simulate_row_wise_nb(target_shape: tp.Shape, group_lens: tp.Array1d, init_cash: tp.Array1d, cash_sharing: bool, segment_mask: tp.ArrayLike = np.asarray(True), call_pre_segment: bool = False, call_post_segment: bool = False, pre_sim_func_nb: PreSimFuncT = no_pre_func_nb, pre_sim_args: tp.Args = (), post_sim_func_nb: PostSimFuncT = no_post_func_nb, post_sim_args: tp.Args = (), pre_row_func_nb: PreRowFuncT = no_pre_func_nb, pre_row_args: tp.Args = (), post_row_func_nb: PostRowFuncT = no_post_func_nb, post_row_args: tp.Args = (), pre_segment_func_nb: PreSegmentFuncT = no_pre_func_nb, pre_segment_args: tp.Args = (), post_segment_func_nb: PostSegmentFuncT = no_post_func_nb, post_segment_args: tp.Args = (), flex_order_func_nb: FlexOrderFuncT = no_flex_order_func_nb, flex_order_args: tp.Args = (), post_order_func_nb: PostOrderFuncT = no_post_func_nb, post_order_args: tp.Args = (), close: tp.ArrayLike = np.asarray(np.nan), ffill_val_price: bool = True, update_value: bool = False, fill_pos_record: bool = True, max_orders: tp.Optional[int] = None, max_logs: int = 0, flex_2d: bool = True) -> tp.Tuple[tp.RecordArray, tp.RecordArray]: """Same as `flex_simulate_nb`, but iterates using row-major order, with the rows changing fastest, and the columns/groups changing slowest.""" check_group_lens_nb(group_lens, target_shape[1]) check_group_init_cash_nb(group_lens, target_shape[1], init_cash, cash_sharing) order_records, log_records = init_records_nb(target_shape, max_orders, max_logs) init_cash = init_cash.astype(np.float_) last_cash = init_cash.copy() last_position = np.full(target_shape[1], 0., dtype=np.float_) last_debt = np.full(target_shape[1], 0., dtype=np.float_) last_free_cash = init_cash.copy()
<filename>swarmops/MetaOptimize.py ######################################################################## # SwarmOps - Heuristic optimization for Python. # Copyright (C) 2003-2016 <NAME>. # See the file README.md for instructions. # See the file LICENSE.txt for license details. # SwarmOps on the internet: http://www.Hvass-Labs.org/ ######################################################################## ######################################################################## # Classes used for tuning the control parameters of an optimizer. # # The basic idea is to have two layers of optimization, the optimizer # whose control parameters we wish to tune (e.g. PSO or DE), and a # meta-optimizer (typically LUS) for meta-optimizing those control # parameters. # # The meta-fitness measures the performance of the optimizer on several # optimization problems. It is basically just the sum of the best # fitness achieved in several optimization runs on each problem. # # To save execution time, the calculation of the meta-fitness may be # aborted pre-emptively if the fitness sum exceeds a limit. # This is known as Pre-Emptive Fitness Evaluation. # # Meta-optimization is described in detail in: # # [1] <NAME>. Tuning & Simplifying Heuristical Optimization (PhD thesis). # University of Southampton, School of Engineering Sciences. 2010 # http://www.hvass-labs.org/people/magnus/thesis/pedersen08thesis.pdf # ######################################################################## import numpy as np from swarmops.LogSolutions import LogSolutions from swarmops.Optimize import MultiRun from swarmops.Problem import Problem from swarmops.Timer import Timer from swarmops.LUS import LUS ######################################################################## class _ProblemRank: """ Helper-class for ranking and sorting the list of problems according to how well an optimization method performed on each problem. This is used with Pre-Emptive Fitness Evaluation for more quickly aborting the computation of the meta-fitness measure when possible. """ def __init__(self, problem, weight=1.0): """ Create object instance. :param problem: Instance of the Problem-class. :param weight: Weight for the problem. :return: Object instance. """ # Copy arguments to instance variables. self.problem = problem self.weight = weight # Initialize the fitness-sum. self.fitness_sum = 0.0 # Initialize the best-found solution and fitness. self.best = None self.best_fitness = np.Infinity def update_best(self, best, best_fitness): """ Update the best-found solution to the problem. :param best: Best-found solution from an optimization run. :param best_fitness: Fitness of the best-found solution. :return: Nothing. """ # If the fitness is an improvement over the best-known. if best_fitness < self.best_fitness: # Update the best-known solution and fitness. self.best = best self.best_fitness = best_fitness ######################################################################## class _MetaFitness(Problem): """ Used for measuring the performance of an optimization method on several problems. This is called the meta-fitness which can then be optimized by another overlaying optimizer which is called the meta-optimizer. """ def __init__(self, optimizer, problems, num_runs, max_evaluations, weights=None): """ Create object instance. :param optimizer: Optimizer-class, e.g. PSO, MOL or DE. :param problems: List of instances of the Problem-class. :param num_runs: Number of optimization runs to perform for each problem. :param max_evaluations: Number of fitness evaluations for each optimization run. :param weights: List of weights for the problems to adjust their mutual importance. :return: Object instance. """ # Copy arguments to instance variables. self.optimizer = optimizer self.num_runs = num_runs self.max_evaluations = max_evaluations # Wrap the problems and weights. This is used for ranking the problems # which significantly speeds up the execution time, as explained below. if weights is None: # No weights were supplied so we just wrap the problems. self.problem_ranks = [_ProblemRank(problem) for problem in problems] else: # Wrap both the problems and weights. self.problem_ranks = [_ProblemRank(problem, weight) for problem, weight in zip(problems, weights)] # The MetaFitness-class is actually an optimization problem, # so init the parent-class. # The dimensionality of the search-space is the number of # control parameters for the optimizer, and the search-space # boundaries are the boundaries for the control parameters. Problem.__init__(self, name="MetaFitness", dim=optimizer.num_parameters, fitness_min=0.0, lower_bound=optimizer.parameters_lower_bound, upper_bound=optimizer.parameters_upper_bound) def fitness(self, x, limit=np.Infinity): """ Calculate the meta-fitness measure. :param x: Control parameters for the optimization method. :param limit: Abort the calculation of the meta-fitness when it becomes greater than this limit. :return: The meta-fitness measures how well the optimizer performed on the list of problems and using the given control parameters. """ # Start a timer so we can later print the time-usage. timer = Timer() # Convenience variables. optimizer = self.optimizer max_evaluations = self.max_evaluations # Initialize the meta-fitness to zero. # The meta-fitness is just the (adjusted) sum of the # fitness obtained on multiple optimization runs. fitness_sum = 0.0 # For each problem do the following. # Note that we iterate over self.problem_ranks which # is sorted so that we first try and optimize the problems # that are most likely to cause fitness_sum to exceed the # limit so the calculation can be aborted. This is called # Pre-Emptive Fitness Evaluation and greatly saves run-time. for problem_rank in self.problem_ranks: # Convenience variables. problem = problem_rank.problem weight = problem_rank.weight # Initialize the fitness sum for this problem. fitness_sum_inner = 0.0 # Perform a number of optimization runs on the problem. for i in range(self.num_runs): # Perform one optimization run on the given problem # using the given control parameters. result = optimizer(problem=problem, max_evaluations=max_evaluations, parameters=x) # Keep track of the best-found solution for this problem. problem_rank.update_best(best=result.best, best_fitness=result.best_fitness) # Adjust the fitness so it is non-negative. fitness_adjusted = result.best_fitness - problem.fitness_min # Print warning if adjusted fitness is negative. Due to tiny rounding # errors this might occur without being an issue. But if the adjusted # fitness is negative and large, then problem.fitness_min must be corrected # in order for Pre-Emptive Fitness Evaluation to work properly. # It is better to print a warning than to use an assert which would # stop the execution. if fitness_adjusted < 0.0: msg = "WARNING: MetaFitness.py, fitness_adjusted is negative {0:.4e} on problem {1}" print(msg.format(fitness_adjusted, problem.name)) # Accumulate the fitness sum for the inner-loop. fitness_sum_inner += weight * fitness_adjusted # Accumulate the overall fitness sum. fitness_sum += weight * fitness_adjusted # If the fitness sum exceeds the limit then break from the inner for-loop. if fitness_sum > limit: break # Update the problem's ranking with the fitness-sum. # This is the key used in sorting below. problem_rank.fitness_sum = fitness_sum_inner # If the fitness sum exceeds the limit then break from the outer for-loop. if fitness_sum > limit: break # Sort the problems using the fitness_sum as the key in descending order. # This increases the probability that the for-loops above can be # aborted pre-emptively the next time the meta-fitness is calculated. self.problem_ranks = sorted(self.problem_ranks, key=lambda rank: rank.fitness_sum, reverse=True) # Stop the timer. timer.stop() # Print various results so we can follow the progress. print("- Parameters tried: {0}".format(x)) msg = "- Meta-Fitness: {0:.4e}, Improvement: {1}" improvement = fitness_sum < limit print(msg.format(fitness_sum, improvement)) print("- Time-Usage: {0}".format(timer)) return fitness_sum ######################################################################## class MetaOptimize: """ Performs meta-optimization, that is, tuning of an optimizer's control parameters by using another overlaying optimizer. """ def __init__(self, optimizer, problems, num_runs, max_evaluations, meta_num_runs=5, meta_max_evaluations=None, weights=None, log_capacity=20, parallel=False): """ Create object instance and perform the meta-optimization. :param optimizer: Optimizer-class, e.g. PSO, MOL or DE. :param problems: List of instances of the Problem-class. :param num_runs: Number of optimization runs to perform for each problem. :param max_evaluations: Number of fitness evaluations for each optimization run. :param meta_num_runs: Number of runs for the meta-optimizer (default 5). :param meta_max_evaluations: Number of iterations for each run of the meta-optimizer. If None then it is set to 20 * number of control parameters for the optimizer. :param weights: List of weights for the problems to adjust their mutual importance when tuning the control parameters of the optimizer. If weights=None then the weight is set to 1.0 for all problems. :param log_capacity: How many of the best control parameters are logged. :param parallel: Execute the meta-optimization runs in parallel. If True then only the best-found control parameters are available afterwards, as the log of best parameters will be empty. The best-found solutions to the optimization problems will also be empty. See README.md for more details. :return: Object instance. Get the optimization results from the object's variables. - results.best are the best control parameters found for the optimizer. - results.best_fitness is the meta-fitness of the best-found parameters. - log.solutions holds several of the best control parameters. - meta_fitness.problem_rank is a list
total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best return b def func_47d3b72343c94709985a4ec5a9754219(p, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best return a def func_10c189a3fd28494689725a3fdc8f9891(p, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best return A def func_d43643d34ebd49c591a6a144a9caf44f(p, N, q, s, r): A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best return best def func_c6d2018833c143c2bd608c67feb3227d(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return best def func_526fa2fc984f4447b08763398b048a81(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return total def func_3366a28a998f41dc933c3362dceb1430(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return totalsum def func_e61ed751ed6c464ea3c72675a48a505b(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return i def func_698f2fced81d49a7b92b09b9cc787229(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return a def func_2e985ed82f844b43a73426f35f0d4513(T, N, A): total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T) return b def func_326a084b251c474aba3bf5157e019e23(T, N, total, A): totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return i def func_83f866438efe4c8f894f8b44ba707abe(T, N, total, A): totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return b def func_192877dd3f8542e0a895f6b9f318d9a9(T, N, total, A): totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return a def func_6be1570d9df14bdeabe0c780d71063f1(T, N, total, A): totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return totalsum def func_ea53fec8af8b41f69313c3e927e451d9(T, N, total, A): totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total, totalsum): b += 1 best = min(best, getsum(a, b, total, totalsum)) best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best / total)) return best def func_172a73f5168749b1ac20de58ee22ef30(infile): N, p, q, r, s = line(infile) A = [((i * p + q) % r + s) for i in xrange(N)] total = sum(A) totalsum = [a for a in A] for i in xrange(1, N): totalsum[i] += totalsum[i - 1] best = total b = 0 for a in xrange(N): if b < a: b += 1 while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1, total,
-offSize file.write(binOffset) for item in self.items: if hasattr(item, "toFile"): item.toFile(file) else: data = tobytes(item, encoding="latin1") file.write(data) class IndexedStringsCompiler(IndexCompiler): def getItems(self, items, strings): return items.strings class TopDictIndexCompiler(IndexCompiler): """Helper class for writing the TopDict to binary.""" def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out def getChildren(self, strings): children = [] for topDict in self.items: children.extend(topDict.getChildren(strings)) return children def getOffsets(self): if self.isCFF2: offsets = [0, self.items[0].getDataLength()] return offsets else: return super(TopDictIndexCompiler, self).getOffsets() def getDataLength(self): if self.isCFF2: dataLength = self.items[0].getDataLength() return dataLength else: return super(TopDictIndexCompiler, self).getDataLength() def toFile(self, file): if self.isCFF2: self.items[0].toFile(file) else: super(TopDictIndexCompiler, self).toFile(file) class FDArrayIndexCompiler(IndexCompiler): """Helper class for writing the `Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_ to binary.""" def getItems(self, items, strings): out = [] for item in items: out.append(item.getCompiler(strings, self)) return out def getChildren(self, strings): children = [] for fontDict in self.items: children.extend(fontDict.getChildren(strings)) return children def toFile(self, file): offsets = self.getOffsets() if self.isCFF2: writeCard32(file, len(self.items)) else: writeCard16(file, len(self.items)) offSize = calcOffSize(offsets[-1]) writeCard8(file, offSize) offSize = -offSize pack = struct.pack for offset in offsets: binOffset = pack(">l", offset)[offSize:] assert len(binOffset) == -offSize file.write(binOffset) for item in self.items: if hasattr(item, "toFile"): item.toFile(file) else: file.write(item) def setPos(self, pos, endPos): self.parent.rawDict["FDArray"] = pos class GlobalSubrsCompiler(IndexCompiler): """Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def getItems(self, items, strings): out = [] for cs in items: cs.compile(self.isCFF2) out.append(cs.bytecode) return out class SubrsCompiler(GlobalSubrsCompiler): """Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def setPos(self, pos, endPos): offset = pos - self.parent.pos self.parent.rawDict["Subrs"] = offset class CharStringsCompiler(GlobalSubrsCompiler): """Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_ to binary.""" def getItems(self, items, strings): out = [] for cs in items: cs.compile(self.isCFF2) out.append(cs.bytecode) return out def setPos(self, pos, endPos): self.parent.rawDict["CharStrings"] = pos class Index(object): """This class represents what the CFF spec calls an INDEX (an array of variable-sized objects). `Index` items can be addressed and set using Python list indexing.""" compilerClass = IndexCompiler def __init__(self, file=None, isCFF2=None): assert (isCFF2 is None) == (file is None) self.items = [] name = self.__class__.__name__ if file is None: return self._isCFF2 = isCFF2 log.log(DEBUG, "loading %s at %s", name, file.tell()) self.file = file if isCFF2: count = readCard32(file) else: count = readCard16(file) if count == 0: return self.items = [None] * count offSize = readCard8(file) log.log(DEBUG, " index count: %s offSize: %s", count, offSize) assert offSize <= 4, "offSize too large: %s" % offSize self.offsets = offsets = [] pad = b'\0' * (4 - offSize) for index in range(count + 1): chunk = file.read(offSize) chunk = pad + chunk offset, = struct.unpack(">L", chunk) offsets.append(int(offset)) self.offsetBase = file.tell() - 1 file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot log.log(DEBUG, " end of %s at %s", name, file.tell()) def __len__(self): return len(self.items) def __getitem__(self, index): item = self.items[index] if item is not None: return item offset = self.offsets[index] + self.offsetBase size = self.offsets[index + 1] - self.offsets[index] file = self.file file.seek(offset) data = file.read(size) assert len(data) == size item = self.produceItem(index, data, file, offset) self.items[index] = item return item def __setitem__(self, index, item): self.items[index] = item def produceItem(self, index, data, file, offset): return data def append(self, item): """Add an item to an INDEX.""" self.items.append(item) def getCompiler(self, strings, parent, isCFF2=None): return self.compilerClass(self, strings, parent, isCFF2=isCFF2) def clear(self): """Empty the INDEX.""" del self.items[:] class GlobalSubrsIndex(Index): """This index contains all the global subroutines in the font. A global subroutine is a set of ``CharString`` data which is accessible to any glyph in the font, and are used to store repeated instructions - for example, components may be encoded as global subroutines, but so could hinting instructions. Remember that when interpreting a ``callgsubr`` instruction (or indeed a ``callsubr`` instruction) that you will need to add the "subroutine number bias" to number given: .. code:: python tt = ttLib.TTFont("Almendra-Bold.otf") u = tt["CFF "].cff[0].CharStrings["udieresis"] u.decompile() u.toXML(XMLWriter(sys.stdout)) # <some stuff> # -64 callgsubr <-- Subroutine which implements the dieresis mark # <other stuff> tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG # <T2CharString (bytecode) at 103451d10> tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT # <T2CharString (source) at 103451390> ("The bias applied depends on the number of subrs (gsubrs). If the number of subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less than 33900, it is 1131; otherwise it is 32768.", `Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`) """ compilerClass = GlobalSubrsCompiler subrClass = psCharStrings.T2CharString charStringClass = psCharStrings.T2CharString def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None, isCFF2=None): super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2) self.globalSubrs = globalSubrs self.private = private if fdSelect: self.fdSelect = fdSelect if fdArray: self.fdArray = fdArray def produceItem(self, index, data, file, offset): if self.private is not None: private = self.private elif hasattr(self, 'fdArray') and self.fdArray is not None: if hasattr(self, 'fdSelect') and self.fdSelect is not None: fdIndex = self.fdSelect[index] else: fdIndex = 0 private = self.fdArray[fdIndex].Private else: private = None return self.subrClass(data, private=private, globalSubrs=self.globalSubrs) def toXML(self, xmlWriter): """Write the subroutines index into XML representation onto the given :class:`fontTools.misc.xmlWriter.XMLWriter`. .. code:: python writer = xmlWriter.XMLWriter(sys.stdout) tt["CFF "].cff[0].GlobalSubrs.toXML(writer) """ xmlWriter.comment( "The 'index' attribute is only for humans; " "it is ignored when parsed.") xmlWriter.newline() for i in range(len(self)): subr = self[i] if subr.needsDecompilation(): xmlWriter.begintag("CharString", index=i, raw=1) else: xmlWriter.begintag("CharString", index=i) xmlWriter.newline() subr.toXML(xmlWriter) xmlWriter.endtag("CharString") xmlWriter.newline() def fromXML(self, name, attrs, content): if name != "CharString": return subr = self.subrClass() subr.fromXML(name, attrs, content) self.append(subr) def getItemAndSelector(self, index): sel = None if hasattr(self, 'fdSelect'): sel = self.fdSelect[index] return self[index], sel class SubrsIndex(GlobalSubrsIndex): """This index contains a glyph's local subroutines. A local subroutine is a private set of ``CharString`` data which is accessible only to the glyph to which the index is attached.""" compilerClass = SubrsCompiler class TopDictIndex(Index): """This index represents the array of ``TopDict`` structures in the font (again, usually only one entry is present). Hence the following calls are equivalent: .. code:: python tt["CFF "].cff[0] # <fontTools.cffLib.TopDict object at 0x102ed6e50> tt["CFF "].cff.topDictIndex[0] # <fontTools.cffLib.TopDict object at 0x102ed6e50> """ compilerClass = TopDictIndexCompiler def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0, isCFF2=None): assert (isCFF2 is None) == (file is None) self.cff2GetGlyphOrder = cff2GetGlyphOrder if file is not None and isCFF2: self._isCFF2 = isCFF2 self.items = [] name = self.__class__.__name__ log.log(DEBUG, "loading %s at %s", name, file.tell()) self.file = file count = 1 self.items = [None] * count self.offsets = [0, topSize] self.offsetBase = file.tell() # pretend we've read the whole lot file.seek(self.offsetBase + topSize) log.log(DEBUG, " end of %s at %s", name, file.tell()) else: super(TopDictIndex, self).__init__(file, isCFF2=isCFF2) def produceItem(self, index, data, file, offset): top = TopDict( self.strings, file, offset, self.GlobalSubrs, self.cff2GetGlyphOrder, isCFF2=self._isCFF2) top.decompile(data) return top def toXML(self, xmlWriter): for i in range(len(self)): xmlWriter.begintag("FontDict", index=i) xmlWriter.newline() self[i].toXML(xmlWriter) xmlWriter.endtag("FontDict") xmlWriter.newline() class FDArrayIndex(Index): compilerClass = FDArrayIndexCompiler def toXML(self, xmlWriter): for i in range(len(self)): xmlWriter.begintag("FontDict", index=i) xmlWriter.newline() self[i].toXML(xmlWriter) xmlWriter.endtag("FontDict") xmlWriter.newline() def produceItem(self, index, data, file, offset): fontDict = FontDict( self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2, vstore=self.vstore) fontDict.decompile(data) return fontDict def fromXML(self, name, attrs, content): if name != "FontDict": return fontDict = FontDict() for element in content: if isinstance(element, str): continue name, attrs, content = element fontDict.fromXML(name, attrs, content) self.append(fontDict) class VarStoreData(object): def __init__(self, file=None, otVarStore=None): self.file = file self.data = None self.otVarStore = otVarStore self.font = TTFont() # dummy font for the decompile function. def decompile(self): if self.file: class GlobalState(object): def __init__(self, tableType, cachingStats): self.tableType = tableType self.cachingStats = cachingStats globalState = GlobalState(tableType="VarStore", cachingStats={}) # read data in from file. Assume position is correct. length = readCard16(self.file) self.data = self.file.read(length) globalState = {} reader = OTTableReader(self.data, globalState) self.otVarStore = ot.VarStore() self.otVarStore.decompile(reader, self.font) return self def compile(self): writer = OTTableWriter() self.otVarStore.compile(writer, self.font) # Note that this omits the initial Card16 length from the CFF2 # VarStore data block self.data = writer.getAllData() def writeXML(self, xmlWriter, name): self.otVarStore.toXML(xmlWriter, self.font) def xmlRead(self, name, attrs, content, parent): self.otVarStore = ot.VarStore() for element in content: if isinstance(element, tuple): name, attrs, content = element self.otVarStore.fromXML(name, attrs, content, self.font) else: pass return None def __len__(self): return len(self.data) def getNumRegions(self, vsIndex): varData = self.otVarStore.VarData[vsIndex] numRegions = varData.VarRegionCount return numRegions class FDSelect(object): def __init__(self, file=None, numGlyphs=None, format=None): if file: # read data in from file self.format = readCard8(file) if self.format == 0: from array import array self.gidArray = array("B", file.read(numGlyphs)).tolist() elif self.format == 3: gidArray = [None] * numGlyphs nRanges = readCard16(file) fd = None prev = None for i in range(nRanges): first = readCard16(file) if prev is not None: for glyphID in range(prev, first): gidArray[glyphID] = fd prev = first fd = readCard8(file) if prev is not None: first = readCard16(file) for glyphID in range(prev, first): gidArray[glyphID] = fd self.gidArray = gidArray elif self.format == 4: gidArray = [None] * numGlyphs nRanges = readCard32(file) fd = None prev = None for i in range(nRanges): first = readCard32(file) if prev is not None: for glyphID in range(prev, first): gidArray[glyphID] = fd prev = first fd = readCard16(file) if prev is not None: first = readCard32(file) for glyphID in range(prev, first): gidArray[glyphID] = fd self.gidArray = gidArray else: assert False, "unsupported FDSelect format: %s" % format else: # reading from XML. Make empty gidArray, and leave format as passed in. # format is None will result in the smallest representation being used. self.format = format self.gidArray = [] def __len__(self): return len(self.gidArray) def __getitem__(self, index): return self.gidArray[index] def __setitem__(self, index, fdSelectValue): self.gidArray[index] = fdSelectValue def append(self, fdSelectValue): self.gidArray.append(fdSelectValue) class CharStrings(object): """The ``CharStrings`` in the font represent the instructions for drawing each glyph. This object presents a dictionary interface to the font's CharStrings, indexed by glyph name: .. code:: python tt["CFF "].cff[0].CharStrings["a"] # <T2CharString (bytecode) at 103451e90> See :class:`fontTools.misc.psCharStrings.T1CharString` and :class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile, compile and interpret the glyph drawing instructions in the returned objects. """ def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=None): self.globalSubrs = globalSubrs if file is not None: self.charStringsIndex = SubrsIndex( file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2) self.charStrings = charStrings = {} for i in range(len(charset)): charStrings[charset[i]] = i # read from OTF file: charStrings.values() are indices into # charStringsIndex. self.charStringsAreIndexed = 1 else: self.charStrings = {} # read from ttx file: charStrings.values() are actual charstrings self.charStringsAreIndexed = 0 self.private = private if fdSelect is not None: self.fdSelect = fdSelect if fdArray is not None: self.fdArray = fdArray def keys(self): return list(self.charStrings.keys()) def values(self): if self.charStringsAreIndexed: return self.charStringsIndex else: return list(self.charStrings.values()) def has_key(self, name): return name in self.charStrings __contains__ = has_key def __len__(self): return len(self.charStrings) def __getitem__(self, name): charString = self.charStrings[name] if self.charStringsAreIndexed: charString = self.charStringsIndex[charString] return charString def __setitem__(self, name, charString): if self.charStringsAreIndexed: index = self.charStrings[name] self.charStringsIndex[index] = charString else: self.charStrings[name] = charString def getItemAndSelector(self, name): if self.charStringsAreIndexed: index = self.charStrings[name] return self.charStringsIndex.getItemAndSelector(index) else: if hasattr(self, 'fdArray'): if hasattr(self, 'fdSelect'): sel = self.charStrings[name].fdSelectIndex else: sel = 0 else: sel = None return self.charStrings[name], sel def toXML(self, xmlWriter): names = sorted(self.keys()) for name in names: charStr, fdSelectIndex = self.getItemAndSelector(name) if charStr.needsDecompilation(): raw = [("raw", 1)] else: raw = [] if fdSelectIndex is None: xmlWriter.begintag("CharString", [('name', name)] + raw) else: xmlWriter.begintag( "CharString", [('name', name), ('fdSelectIndex', fdSelectIndex)]
"""This module provides ``docutils.nodes.GenericNodeVisitor`` subclasses, which generates JSONable, 'database friendly', information about the document. This can be used for fast-lookup of the position of elements in the document, and to reference/target mappings. The visitor should be run via the ``LSPTransform`` class:: transform = LSPTransform(document) transform.apply(source_content) """ import logging from typing import List, Optional import uuid try: from typing import TypedDict except ImportError: from typing_extensions import TypedDict from docutils import nodes from docutils.transforms import Transform from rst_lsp.docutils_ext.inliner_lsp import LSPInline from rst_lsp.docutils_ext.block_lsp import LSPDirective, LSPBlockTarget, LSPSection from rst_lsp.server.constants import SymbolKind from rst_lsp.server.datatypes import DocumentSymbol logger = logging.getLogger(__name__) class LSPTransform(Transform): default_priority = 1 def __init__(self, document, startnode=None): super().__init__(document, startnode=startnode) self._visitor_ref = None self._visitor_lsp = None @property def db_positions(self): if self._visitor_lsp is None: raise AttributeError("must call `apply` first") return self._visitor_lsp.db_positions @property def db_references(self): if self._visitor_lsp is None: raise AttributeError("must call `apply` first") return self._visitor_lsp.db_references @property def db_pending_xrefs(self): if self._visitor_lsp is None: raise AttributeError("must call `apply` first") return self._visitor_lsp.db_pending_refs @property def db_targets(self): if self._visitor_lsp is None: raise AttributeError("must call `apply` first") return self._visitor_lsp.db_targets @property def db_doc_symbols(self): if self._visitor_lsp is None: raise AttributeError("must call `apply` first") return self._visitor_lsp.nesting.db_doc_symbols def apply(self, source_content): remove = [] try: for node in [LSPInline, LSPDirective, LSPBlockTarget, LSPSection]: name = node.__name__ if not hasattr(nodes.GenericNodeVisitor, "visit_" + node.__name__): setattr( nodes.GenericNodeVisitor, "visit_" + name, nodes._call_default_visit, ) setattr( nodes.GenericNodeVisitor, "depart_" + name, nodes._call_default_departure, ) remove.append(name) self._visitor_ref = VisitorRef2Target(self.document) self.document.walk(self._visitor_ref) self._visitor_lsp = VisitorLSP(self.document, source_content) self.document.walkabout(self._visitor_lsp) finally: for name in remove: delattr(nodes.GenericNodeVisitor, "visit_" + name) delattr(nodes.GenericNodeVisitor, "depart_" + name) class VisitorRef2Target(nodes.GenericNodeVisitor): """Visitor to link references to their targets. This is adapted from the code in ``transforms.references.Substitutions``, ``transforms.references.AnonymousHyperlinks``, ``transforms.references.Footnotes``, ``transforms.references.ExternalTargets``, and ``transforms.references.InternalTargets``. """ def __init__(self, document: nodes.document): super().__init__(document) self.document = self.document # type: nodes.document # TODO handle anonymous in VisitorLSP self.anonymous_targets = [] self.anonymous_refs = [] # assign ids to substitution definitions for sub_def_node in self.document.substitution_defs.values(): sub_def_node["target_uuid"] = self.get_uuid() # assign ids to citation definitions for citation_node in self.document.citations: citation_id = self.get_uuid() citation_node["target_uuid"] = citation_id for label in citation_node["names"]: if label in self.document.citation_refs: for refnode in self.document.citation_refs[label]: if "citerefid" not in refnode: refnode["citerefid"] = citation_id # assign ids to footnote definitions for footnode_node in self.document.footnotes: foot_id = self.get_uuid() footnode_node["target_uuid"] = foot_id for label in footnode_node["names"]: if label in self.document.footnote_refs: for refnode in self.document.footnote_refs[label]: if "footrefid" not in refnode: refnode["footrefid"] = foot_id # TODO assign ids to auto-numbered / symbol footnote definitions def get_uuid(self): return str(uuid.uuid4()) def visit_target(self, node): targetid = self.get_uuid() node["target_uuid"] = targetid if node.get("anonymous"): self.anonymous_targets.append(node) return for name in node["names"]: reflist = self.document.refnames.get(name, []) for ref in reflist: if "targetrefid" not in ref: ref["targetrefid"] = targetid def visit_reference(self, node): if node.get("anonymous"): self.anonymous_refs.append(node) def visit_substitution_reference(self, node): refname = node["refname"] key = None if refname in self.document.substitution_defs: key = refname else: normed_name = refname.lower() # Mapping of case-normalized substitution names to case-sensitive names. key = self.document.substitution_names.get(normed_name, None) if key is None: self.document.reporter.warning( f'Undefined substitution referenced: "{refname}".', base_node=node ) node["subrefid"] = None else: node["subrefid"] = self.document.substitution_defs[key]["target_uuid"] def visit_citation_reference(self, node): if "citerefid" not in node: refname = node["refname"] classes = node.get("classes", None) if not node.get("classes", None): # sphinxcontrib-bibtex for example uses a citation_reference, # with node["classes"] = ["bibtex"] # and we don't want to raise warnings for this type of citation_reference self.document.reporter.warning( f'Undefined citation referenced: "{refname}" {classes}.', base_node=node, ) node["citerefid"] = None def add_target_uuid(self, node): if "names" in node and node["names"] and "target_uuid" not in node: node["target_uuid"] = self.get_uuid() # def visit_image(self, node): # self.add_target_uuid(node) # def visit_figure(self, node): # self.add_target_uuid(node) # def visit_table(self, node): # self.add_target_uuid(node) # def visit_literal_block(self, node): # self.add_target_uuid(node) # def visit_math_block(self, node): # if "label" in node: # node["target_uuid"] = self.get_uuid() # name = node["label"] def default_visit(self, node): """Override for generic, uniform traversals.""" # TODO ignore auto-numbered footnotes? self.add_target_uuid(node) def unknown_visit(self, node): """Override for generic, uniform traversals.""" pass class DBElement(TypedDict): uuid: str parent_uuid: Optional[str] block: bool category: str title: str startLine: int startCharacter: int endLine: int endCharacter: int # then element specific data # TODO use TypedDict with undefined keys? ELEMENT2KIND = { # inlines "ref_basic": SymbolKind.Property, "ref_anon": SymbolKind.Property, "ref_cite": SymbolKind.Property, "ref_foot": SymbolKind.Property, "ref_sub": SymbolKind.Property, "ref_phrase": SymbolKind.Property, "target_inline": SymbolKind.Field, "role": SymbolKind.Function, # blocks "section": SymbolKind.Module, "directive": SymbolKind.Class, "footnote": SymbolKind.Field, "citation": SymbolKind.Field, "hyperlink_target": SymbolKind.Field, "substitution_def": SymbolKind.Field, } class NestedElements: """This class keeps a record of the current elements entered.""" def __init__(self): self._entered_uuid = [] self._doc_symbols = [] # type: List[DocumentSymbol] def enter_block(self, node, data: DBElement): # logger.debug(f"entering node: {node}") uuid_value = data["uuid"] node.uuid_value = uuid_value # this is used to check consistency of exits self._add_doc_symbols(data) self._entered_uuid.append(uuid_value) def exit_block(self, node): # logger.debug(f"exiting node: {node}") try: if self._entered_uuid[-1] != node.uuid_value: raise AssertionError("Exiting a non-leaf element") except AttributeError: raise AssertionError("node property 'uuid_value' not set") self._entered_uuid.pop() del node.uuid_value def add_inline(self, data: DBElement): self._add_doc_symbols(data) def _add_doc_symbols(self, data: DBElement): current_parent = self._doc_symbols for _ in self._entered_uuid: current_parent = current_parent[-1].setdefault("children", []) current_parent.append( { "name": data["title"], "detail": f'type: {data["category"]}', "kind": ELEMENT2KIND.get(data["category"], SymbolKind.Constant), "range": { "start": { "line": data["startLine"], "character": data["startCharacter"], }, "end": {"line": data["endLine"], "character": data["endCharacter"]}, }, # TODO only select first line? "selectionRange": { "start": { "line": data["startLine"], "character": data["startCharacter"], }, "end": {"line": data["endLine"], "character": data["endCharacter"]}, }, } ) @property def parent_uuid(self): return None if not self._entered_uuid else self._entered_uuid[-1] @property def db_doc_symbols(self) -> List[DocumentSymbol]: return self._doc_symbols class VisitorLSP(nodes.GenericNodeVisitor): """Extract information, to generate data for Language Service Providers.""" def __init__(self, document, source): super().__init__(document) self.source_lines = source.splitlines() self.db_positions = [] self.db_references = [] self.db_pending_refs = [] self.db_targets = [] self.nesting = NestedElements() self.current_inline = None # TODO add option to remove LSP nodes def get_uuid(self): return str(uuid.uuid4()) def get_block_range(self, start_indx, end_indx, indent_start=True): """Return the range of a block.""" start_line = self.source_lines[start_indx] start_column = 0 if indent_start: start_column = len(start_line) - len(start_line.lstrip()) last_indx = len(self.source_lines) - 1 end_indx = last_indx if end_indx > last_indx else end_indx end_column = len(self.source_lines[end_indx]) - 1 end_column = 0 if end_column < 0 else end_column return (start_indx, start_column, end_indx, end_column) def visit_LSPSection(self, node): if node.line_end is not None: start_indx, start_column, end_indx, end_column = self.get_block_range( node.line_start, node.line_end ) uuid_value = self.get_uuid() data = { "uuid": uuid_value, "title": node.title, "parent_uuid": self.nesting.parent_uuid, "block": True, "category": "section", "startLine": start_indx, "startCharacter": start_column, "endLine": end_indx, "endCharacter": end_column, "section_level": node.level, } self.db_positions.append(data) self.nesting.enter_block(node, data) def visit_LSPDirective(self, node): start_indx, start_column, end_indx, end_column = self.get_block_range( node.line_start, node.line_end ) uuid_value = self.get_uuid() data = { "uuid": uuid_value, "title": node.dname, "parent_uuid": self.nesting.parent_uuid, "block": True, "category": "directive", "startLine": start_indx, "startCharacter": start_column, "endLine": end_indx, "endCharacter": end_column, "directive_name": node.dname, "directive_data": { "contentLine": node.line_content, "contentIndent": node.content_indent + start_column if node.content_indent else None, "arguments": node.arguments, "options": node.options, "klass": node.klass, }, } self.db_positions.append(data) self.nesting.enter_block(node, data) def visit_LSPBlockTarget(self, node): start_indx, start_column, end_indx, end_column = self.get_block_range( node.line_start, node.line_end ) uuid_value = self.get_uuid() data = { "uuid": uuid_value, "title": node.etype, "parent_uuid": self.nesting.parent_uuid, "block": True, "category": node.etype, "startLine": start_indx, "startCharacter": start_column, "endLine": end_indx, "endCharacter": end_column, } self.db_positions.append(data) self.nesting.enter_block(node, data) def visit_LSPInline(self, node): sline, scol, eline, ecol = node.attributes["position"] data = { "uuid": self.get_uuid(), "title": node.attributes["type"], "parent_uuid": self.nesting.parent_uuid, "block": False, "category": node.attributes["type"], "startLine": sline, "startCharacter": scol, "endLine": eline, "endCharacter": ecol, } if "role" in node.attributes: data["title"] = node.attributes["role"] data["role_name"] = node.attributes["role"] self.current_inline = data["uuid"] self.db_positions.append(data) self.nesting.add_inline(data) def depart_LSPSection(self, node): if node.line_end is not None: self.nesting.exit_block(node) def depart_LSPDirective(self, node): self.nesting.exit_block(node) def depart_LSPBlockTarget(self, node): self.nesting.exit_block(node) def depart_LSPInline(self, node): self.current_inline = None def visit_pending_xref(self, node): """deal with roles like ``:ref:`` and ``:numref:``""" parent_uuid = None if self.current_inline is not None: parent_uuid = self.current_inline elif self.nesting.parent_uuid is not None: parent_uuid = self.nesting.parent_uuid if parent_uuid is not None: data = { "position_uuid": parent_uuid, "node_type": node.__class__.__name__, "classes": node.get("classes", []), } for name in ("refdomain", "refexplicit", "reftarget", "reftype", "refwarn"): data[name] = node[name] self.db_pending_refs.append(data) def default_visit(self, node): parent_uuid = None if self.current_inline is not None: parent_uuid = self.current_inline elif self.nesting.parent_uuid is not None: parent_uuid = self.nesting.parent_uuid if parent_uuid is not None: # TODO record additional sphinx target nodes, like math_block's with label if "target_uuid" in node and node["target_uuid"]: self.db_targets.append( { "position_uuid": parent_uuid, "node_type": node.__class__.__name__, "classes": node.get("classes", []), "names": node.get("names", []), "uuid": node["target_uuid"], } ) for ref_attr in ("footrefid", "citerefid", "targetrefid", "subrefid"): if ref_attr in node and not node.get("classes", []): # bibtex/glossary extension identify themselves with
16) CalculatedFrameList = int('00081162', 16) TimeRange = int('00081163', 16) FrameExtractionSequence = int('00081164', 16) MultiframeSourceSOPInstanceUID = int('00081167', 16) RetrieveURL = int('00081190', 16) TransactionUID = int('00081195', 16) WarningReason = int('00081196', 16) FailureReason = int('00081197', 16) FailedSOPSequence = int('00081198', 16) ReferencedSOPSequence = int('00081199', 16) OtherFailuresSequence = int('0008119A', 16) StudiesContainingOtherReferencedInstancesSequence = int('00081200', 16) RelatedSeriesSequence = int('00081250', 16) LossyImageCompressionRetired = int('00082110', 16) DerivationDescription = int('00082111', 16) SourceImageSequence = int('00082112', 16) StageName = int('00082120', 16) StageNumber = int('00082122', 16) NumberofStages = int('00082124', 16) ViewName = int('00082127', 16) ViewNumber = int('00082128', 16) NumberofEventTimers = int('00082129', 16) NumberofViewsinStage = int('0008212A', 16) EventElapsedTimes = int('00082130', 16) EventTimerNames = int('00082132', 16) EventTimerSequence = int('00082133', 16) EventTimeOffset = int('00082134', 16) EventCodeSequence = int('00082135', 16) StartTrim = int('00082142', 16) StopTrim = int('00082143', 16) RecommendedDisplayFrameRate = int('00082144', 16) TransducerPosition = int('00082200', 16) TransducerOrientation = int('00082204', 16) AnatomicStructure = int('00082208', 16) AnatomicRegionSequence = int('00082218', 16) AnatomicRegionModifierSequence = int('00082220', 16) PrimaryAnatomicStructureSequence = int('00082228', 16) AnatomicStructureSpaceorRegionSequence = int('00082229', 16) PrimaryAnatomicStructureModifierSequence = int('00082230', 16) TransducerPositionSequence = int('00082240', 16) TransducerPositionModifierSequence = int('00082242', 16) TransducerOrientationSequence = int('00082244', 16) TransducerOrientationModifierSequence = int('00082246', 16) AnatomicStructureSpaceOrRegionCodeSequenceTrial = int('00082251', 16) AnatomicPortalOfEntranceCodeSequenceTrial = int('00082253', 16) AnatomicApproachDirectionCodeSequenceTrial = int('00082255', 16) AnatomicPerspectiveDescriptionTrial = int('00082256', 16) AnatomicPerspectiveCodeSequenceTrial = int('00082257', 16) AnatomicLocationOfExaminingInstrumentDescriptionTrial = int('00082258', 16) AnatomicLocationOfExaminingInstrumentCodeSequenceTrial = int( '00082259', 16) AnatomicStructureSpaceOrRegionModifierCodeSequenceTrial = int( '0008225A', 16) OnAxisBackgroundAnatomicStructureCodeSequenceTrial = int('0008225C', 16) AlternateRepresentationSequence = int('00083001', 16) IrradiationEventUID = int('00083010', 16) SourceIrradiationEventSequence = int('00083011', 16) RadiopharmaceuticalAdministrationEventUID = int('00083012', 16) IdentifyingComments = int('00084000', 16) FrameType = int('00089007', 16) ReferencedImageEvidenceSequence = int('00089092', 16) ReferencedRawDataSequence = int('00089121', 16) CreatorVersionUID = int('00089123', 16) DerivationImageSequence = int('00089124', 16) SourceImageEvidenceSequence = int('00089154', 16) PixelPresentation = int('00089205', 16) VolumetricProperties = int('00089206', 16) VolumeBasedCalculationTechnique = int('00089207', 16) ComplexImageComponent = int('00089208', 16) AcquisitionContrast = int('00089209', 16) DerivationCodeSequence = int('00089215', 16) ReferencedPresentationStateSequence = int('00089237', 16) ReferencedOtherPlaneSequence = int('00089410', 16) FrameDisplaySequence = int('00089458', 16) RecommendedDisplayFrameRateinFloat = int('00089459', 16) SkipFrameRangeFlag = int('00089460', 16) PatientsName = int('00100010', 16) PatientID = int('00100020', 16) IssuerofPatientID = int('00100021', 16) TypeofPatientID = int('00100022', 16) IssuerofPatientIDQualifiersSequence = int('00100024', 16) SourcePatientGroupIdentificationSequence = int('00100026', 16) GroupofPatientsIdentificationSequence = int('00100027', 16) SubjectRelativePositioninImage = int('00100028', 16) PatientsBirthDate = int('00100030', 16) PatientsBirthTime = int('00100032', 16) PatientsBirthDateinAlternativeCalendar = int('00100033', 16) PatientsDeathDateinAlternativeCalendar = int('00100034', 16) PatientsAlternativeCalendar = int('00100035', 16) PatientsSex = int('00100040', 16) PatientsInsurancePlanCodeSequence = int('00100050', 16) PatientsPrimaryLanguageCodeSequence = int('00100101', 16) PatientsPrimaryLanguageModifierCodeSequence = int('00100102', 16) QualityControlSubject = int('00100200', 16) QualityControlSubjectTypeCodeSequence = int('00100201', 16) StrainDescription = int('00100212', 16) StrainNomenclature = int('00100213', 16) StrainStockNumber = int('00100214', 16) StrainSourceRegistryCodeSequence = int('00100215', 16) StrainStockSequence = int('00100216', 16) StrainSource = int('00100217', 16) StrainAdditionalInformation = int('00100218', 16) StrainCodeSequence = int('00100219', 16) OtherPatientIDs = int('00101000', 16) OtherPatientNames = int('00101001', 16) OtherPatientIDsSequence = int('00101002', 16) PatientsBirthName = int('00101005', 16) PatientsAge = int('00101010', 16) PatientsSize = int('00101020', 16) PatientsSizeCodeSequence = int('00101021', 16) PatientsWeight = int('00101030', 16) PatientsAddress = int('00101040', 16) InsurancePlanIdentification = int('00101050', 16) PatientsMothersBirthName = int('00101060', 16) MilitaryRank = int('00101080', 16) BranchofService = int('00101081', 16) MedicalRecordLocator = int('00101090', 16) ReferencedPatientPhotoSequence = int('00101100', 16) MedicalAlerts = int('00102000', 16) Allergies = int('00102110', 16) CountryofResidence = int('00102150', 16) RegionofResidence = int('00102152', 16) PatientsTelephoneNumbers = int('00102154', 16) PatientsTelecomInformation = int('00102155', 16) EthnicGroup = int('00102160', 16) Occupation = int('00102180', 16) SmokingStatus = int('001021A0', 16) AdditionalPatientHistory = int('001021B0', 16) PregnancyStatus = int('001021C0', 16) LastMenstrualDate = int('001021D0', 16) PatientsReligiousPreference = int('001021F0', 16) PatientSpeciesDescription = int('00102201', 16) PatientSpeciesCodeSequence = int('00102202', 16) PatientsSexNeutered = int('00102203', 16) AnatomicalOrientationType = int('00102210', 16) PatientBreedDescription = int('00102292', 16) PatientBreedCodeSequence = int('00102293', 16) BreedRegistrationSequence = int('00102294', 16) BreedRegistrationNumber = int('00102295', 16) BreedRegistryCodeSequence = int('00102296', 16) ResponsiblePerson = int('00102297', 16) ResponsiblePersonRole = int('00102298', 16) ResponsibleOrganization = int('00102299', 16) PatientComments = int('00104000', 16) ExaminedBodyThickness = int('00109431', 16) ClinicalTrialSponsorName = int('00120010', 16) ClinicalTrialProtocolID = int('00120020', 16) ClinicalTrialProtocolName = int('00120021', 16) ClinicalTrialSiteID = int('00120030', 16) ClinicalTrialSiteName = int('00120031', 16) ClinicalTrialSubjectID = int('00120040', 16) ClinicalTrialSubjectReadingID = int('00120042', 16) ClinicalTrialTimePointID = int('00120050', 16) ClinicalTrialTimePointDescription = int('00120051', 16) ClinicalTrialCoordinatingCenterName = int('00120060', 16) PatientIdentityRemoved = int('00120062', 16) DeidentificationMethod = int('00120063', 16) DeidentificationMethodCodeSequence = int('00120064', 16) ClinicalTrialSeriesID = int('00120071', 16) ClinicalTrialSeriesDescription = int('00120072', 16) ClinicalTrialProtocolEthicsCommitteeName = int('00120081', 16) ClinicalTrialProtocolEthicsCommitteeApprovalNumber = int('00120082', 16) ConsentforClinicalTrialUseSequence = int('00120083', 16) DistributionType = int('00120084', 16) ConsentforDistributionFlag = int('00120085', 16) CADFileFormat = int('00140023', 16) ComponentReferenceSystem = int('00140024', 16) ComponentManufacturingProcedure = int('00140025', 16) ComponentManufacturer = int('00140028', 16) MaterialThickness = int('00140030', 16) MaterialPipeDiameter = int('00140032', 16) MaterialIsolationDiameter = int('00140034', 16) MaterialGrade = int('00140042', 16) MaterialPropertiesDescription = int('00140044', 16) MaterialPropertiesFileFormatRetired = int('00140045', 16) MaterialNotes = int('00140046', 16) ComponentShape = int('00140050', 16) CurvatureType = int('00140052', 16) OuterDiameter = int('00140054', 16) InnerDiameter = int('00140056', 16) ComponentWelderIDs = int('00140100', 16) SecondaryApprovalStatus = int('00140101', 16) SecondaryReviewDate = int('00140102', 16) SecondaryReviewTime = int('00140103', 16) SecondaryReviewerName = int('00140104', 16) RepairID = int('00140105', 16) MultipleComponentApprovalSequence = int('00140106', 16) OtherApprovalStatus = int('00140107', 16) OtherSecondaryApprovalStatus = int('00140108', 16) ActualEnvironmentalConditions = int('00141010', 16) ExpiryDate = int('00141020', 16) EnvironmentalConditions = int('00141040', 16) EvaluatorSequence = int('00142002', 16) EvaluatorNumber = int('00142004', 16) EvaluatorName = int('00142006', 16) EvaluationAttempt = int('00142008', 16) IndicationSequence = int('00142012', 16) IndicationNumber = int('00142014', 16) IndicationLabel = int('00142016', 16) IndicationDescription = int('00142018', 16) IndicationType = int('0014201A', 16) IndicationDisposition = int('0014201C', 16) IndicationROISequence = int('0014201E', 16) IndicationPhysicalPropertySequence = int('00142030', 16) PropertyLabel = int('00142032', 16) CoordinateSystemNumberofAxes = int('00142202', 16) CoordinateSystemAxesSequence = int('00142204', 16) CoordinateSystemAxisDescription = int('00142206', 16) CoordinateSystemDataSetMapping = int('00142208', 16) CoordinateSystemAxisNumber = int('0014220A', 16) CoordinateSystemAxisType = int('0014220C', 16) CoordinateSystemAxisUnits = int('0014220E', 16) CoordinateSystemAxisValues = int('00142210', 16) CoordinateSystemTransformSequence = int('00142220', 16) TransformDescription = int('00142222', 16) TransformNumberofAxes = int('00142224', 16) TransformOrderofAxes = int('00142226', 16) TransformedAxisUnits = int('00142228', 16) CoordinateSystemTransformRotationandScaleMatrix = int('0014222A', 16) CoordinateSystemTransformTranslationMatrix = int('0014222C', 16) InternalDetectorFrameTime = int('00143011', 16) NumberofFramesIntegrated = int('00143012', 16) DetectorTemperatureSequence = int('00143020', 16) SensorName = int('00143022', 16) HorizontalOffsetofSensor = int('00143024', 16) VerticalOffsetofSensor = int('00143026', 16) SensorTemperature = int('00143028', 16) DarkCurrentSequence = int('00143040', 16) DarkCurrentCounts = int('00143050', 16) GainCorrectionReferenceSequence = int('00143060', 16) AirCounts = int('00143070', 16) KVUsedinGainCalibration = int('00143071', 16) MAUsedinGainCalibration = int('00143072', 16) NumberofFramesUsedforIntegration = int('00143073', 16) FilterMaterialUsedinGainCalibration = int('00143074', 16) FilterThicknessUsedinGainCalibration = int('00143075', 16) DateofGainCalibration = int('00143076', 16) TimeofGainCalibration = int('00143077', 16) BadPixelImage = int('00143080', 16) CalibrationNotes = int('00143099', 16) PulserEquipmentSequence = int('00144002', 16) PulserType = int('00144004', 16) PulserNotes = int('00144006', 16) ReceiverEquipmentSequence = int('00144008', 16) AmplifierType = int('0014400A', 16) ReceiverNotes = int('0014400C', 16) PreAmplifierEquipmentSequence = int('0014400E', 16) PreAmplifierNotes = int('0014400F', 16) TransmitTransducerSequence = int('00144010', 16) ReceiveTransducerSequence = int('00144011', 16) NumberofElements = int('00144012', 16) ElementShape = int('00144013', 16) ElementDimensionA = int('00144014', 16) ElementDimensionB = int('00144015', 16) ElementPitchA = int('00144016', 16) MeasuredBeamDimensionA = int('00144017', 16) MeasuredBeamDimensionB = int('00144018', 16) LocationofMeasuredBeamDiameter = int('00144019', 16) NominalFrequency = int('0014401A', 16) MeasuredCenterFrequency = int('0014401B', 16) MeasuredBandwidth = int('0014401C', 16) ElementPitchB = int('0014401D', 16) PulserSettingsSequence = int('00144020', 16) PulseWidth = int('00144022', 16) ExcitationFrequency = int('00144024', 16) ModulationType = int('00144026', 16) Damping = int('00144028', 16) ReceiverSettingsSequence = int('00144030', 16) AcquiredSoundpathLength = int('00144031', 16) AcquisitionCompressionType = int('00144032', 16) AcquisitionSampleSize = int('00144033', 16) RectifierSmoothing = int('00144034', 16) DACSequence = int('00144035', 16) DACType = int('00144036', 16) DACGainPoints = int('00144038', 16) DACTimePoints = int('0014403A', 16) DACAmplitude = int('0014403C', 16) PreAmplifierSettingsSequence = int('00144040', 16) TransmitTransducerSettingsSequence = int('00144050', 16) ReceiveTransducerSettingsSequence = int('00144051', 16) IncidentAngle = int('00144052', 16) CouplingTechnique = int('00144054', 16) CouplingMedium = int('00144056', 16) CouplingVelocity = int('00144057', 16) ProbeCenterLocationX = int('00144058', 16) ProbeCenterLocationZ = int('00144059', 16) SoundPathLength = int('0014405A', 16) DelayLawIdentifier = int('0014405C', 16) GateSettingsSequence = int('00144060', 16) GateThreshold = int('00144062', 16) VelocityofSound = int('00144064', 16) CalibrationSettingsSequence = int('00144070', 16) CalibrationProcedure = int('00144072', 16) ProcedureVersion = int('00144074', 16) ProcedureCreationDate = int('00144076', 16) ProcedureExpirationDate = int('00144078', 16) ProcedureLastModifiedDate = int('0014407A', 16) CalibrationTime = int('0014407C', 16) CalibrationDate = int('0014407E', 16) ProbeDriveEquipmentSequence = int('00144080', 16) DriveType = int('00144081', 16) ProbeDriveNotes = int('00144082', 16) DriveProbeSequence = int('00144083', 16) ProbeInductance = int('00144084', 16) ProbeResistance = int('00144085', 16) ReceiveProbeSequence = int('00144086', 16) ProbeDriveSettingsSequence = int('00144087', 16) BridgeResistors = int('00144088', 16) ProbeOrientationAngle = int('00144089', 16) UserSelectedGainY = int('0014408B', 16) UserSelectedPhase = int('0014408C', 16) UserSelectedOffsetX = int('0014408D', 16) UserSelectedOffsetY = int('0014408E', 16) ChannelSettingsSequence = int('00144091', 16) ChannelThreshold = int('00144092', 16) ScannerSettingsSequence = int('0014409A', 16) ScanProcedure = int('0014409B', 16) TranslationRateX = int('0014409C', 16) TranslationRateY = int('0014409D', 16) ChannelOverlap = int('0014409F', 16) ImageQualityIndicatorType = int('001440A0', 16) ImageQualityIndicatorMaterial
<gh_stars>1-10 # -*- coding: utf-8 -*- from escher.quick_server import serve_and_open from escher import urls import os from os.path import dirname, abspath, join, isfile, isdir from warnings import warn from urllib2 import urlopen, HTTPError, URLError import json import shutil import appdirs import re from jinja2 import Environment, PackageLoader, Template import codecs import random import string # set up jinja2 template location env = Environment(loader=PackageLoader('escher', 'templates')) def get_cache_dir(name=None): """ Get the cache dir as a string. name: an optional subdirectory within the cache """ cache_dir = join(appdirs.user_cache_dir('escher', appauthor="<NAME>")) if name is not None: cache_dir = join(cache_dir, name) try: os.makedirs(cache_dir) except OSError: pass return cache_dir def clear_cache(): """Empty the contents of the cache directory.""" cache_dir = get_cache_dir() for root, dirs, files in os.walk(cache_dir): for f in files: os.unlink(join(root, f)) for d in dirs: shutil.rmtree(join(root, d)) def list_cached_maps(): """Return a list of all cached maps.""" try: return [x.replace('.json', '') for x in os.listdir(get_cache_dir(name='maps'))] except OSError: print 'No cached maps' return None def list_cached_models(): """Return a list of all cached models.""" try: return [x.replace('.json', '') for x in os.listdir(get_cache_dir(name='models'))] except OSError: print 'No cached maps' return None def get_an_id(): return unicode(''.join(random.choice(string.ascii_lowercase) for _ in range(10))) def load_resource(resource, name, safe=False): """Load a resource that could be a file, URL, or json string.""" # if it's a url, download it if resource.startswith('http://') or resource.startswith('https://'): try: download = urlopen(resource) except URLError as err: raise err else: return download.read() # if it's a filepath, load it if os.path.exists(resource): if (safe): raise Exception('Cannot load resource from file with safe mode enabled.') try: with open(resource, 'r') as f: loaded_resource = f.read() _ = json.loads(loaded_resource) except ValueError as err: raise ValueError('%s not a valid json file' % name) else: return loaded_resource # try to validate the json try: _ = json.loads(resource) except ValueError as err: raise ValueError('Could not load %s. Not valid json, url, or filepath' % name) else: return resource raise Exception('Could not load %s.' % name) class Builder(object): """Viewable metabolic map. This map will also show metabolic fluxes passed in during consruction. It can be viewed as a standalone html inside a browswer. Alternately, the respresentation inside an IPython notebook will also display the map. Maps are stored in json files and are stored in a cache directory. Maps which are not found will be downloaded from a map repository if found. Arguments --------- map_name: a string specifying a map to be downloaded from the Escher web server. map_json: a json string, or a file path to a json file, or a URL specifying a json file to be downloaded. model_name: a string specifying a model to be downloaded from the Escher web server. model_json: a json string, or a file path to a json file, or a URL specifying a json file to be downloaded. reaction_data: a dictionary with keys that correspond to reaction ids and values that will be mapped to reaction arrows and labels. reaction_data: a dictionary with keys that correspond to metabolite ids and values that will be mapped to metabolite nodes and labels. local_host: a hostname that will be used for any local files in dev mode. Defaults to the current host. safe: if True, then loading files from the filesytem is not allowed. This is to ensure the safety of using Builder with a web server. """ def __init__(self, map_name=None, map_json=None, model_name=None, model_json=None, reaction_data=None, metabolite_data=None, local_host='', safe=False): self.safe = safe # load the map self.map_name = map_name self.map_json = map_json self.loaded_map_json = None if map_name and map_json: warn('map_json overrides map_name') self.load_map() # load the model self.model_name = model_name self.model_json = model_json self.loaded_model_json = None if model_name and model_json: warn('model_json overrides model_name') self.load_model() # set the args self.reaction_data = reaction_data self.metabolite_data = metabolite_data self.local_host = local_host.strip(os.sep) # make the unique id self.generate_id() def generate_id(self): self.the_id = get_an_id() def load_model(self): """Load the model from input model_json using load_resource, or, secondarily, from model_name. """ model_json = self.model_json if model_json is not None: self.loaded_model_json = load_resource(self.model_json, 'model_json', safe=self.safe) elif self.model_name is not None: # get the name model_name = self.model_name model_name = model_name.replace(".json", "") # if the file is not present attempt to download cache_dir = get_cache_dir(name='models') model_filename = join(cache_dir, model_name + ".json") if not isfile(model_filename): model_not_cached = 'Model "%s" not in cache. Attempting download from %s' % \ (model_name, urls.escher_home) warn(model_not_cached) try: url = urls.model_download + model_name + ".json" download = urlopen(url) with open(model_filename, "w") as outfile: outfile.write(download.read()) except HTTPError: raise ValueError("No model named %s found in cache or at %s" % \ (model_name, url)) with open(model_filename) as f: self.loaded_model_json = f.read() def load_map(self): """Load the map from input map_json using load_resource, or, secondarily, from map_name. """ map_json = self.map_json if map_json is not None: self.loaded_map_json = load_resource(self.map_json, 'map_json', safe=self.safe) elif self.map_name is not None: # get the name map_name = self.map_name map_name = map_name.replace(".json", "") # if the file is not present attempt to download cache_dir = get_cache_dir(name='maps') map_filename = join(cache_dir, map_name + ".json") if not isfile(map_filename): map_not_cached = 'Map "%s" not in cache. Attempting download from %s' % \ (map_name, urls.escher_home) warn(map_not_cached) try: url = urls.map_download + map_name + ".json" download = urlopen(url) with open(map_filename, "w") as outfile: outfile.write(download.read()) except HTTPError: raise ValueError("No map named %s found in cache or at %s" % \ (map_name, url)) with open(map_filename) as f: self.loaded_map_json = f.read() def _embedded_css(self, is_local): loc = (join(self.local_host, urls.builder_embed_css_local) if is_local else urls.builder_embed_css) download = urlopen(urls.builder_embed_css) return unicode(download.read().replace('\n', ' ')) def _initialize_javascript(self, is_local): javascript = (u"var map_data_{the_id} = {map_data};" u"var cobra_model_{the_id} = {cobra_model};" u"var reaction_data_{the_id} = {reaction_data};" u"var metabolite_data_{the_id} = {metabolite_data};" u"var css_string_{the_id} = '{style}';").format( the_id=self.the_id, map_data=(self.loaded_map_json if self.loaded_map_json else u'null'), cobra_model=(self.loaded_model_json if self.loaded_model_json else u'null'), reaction_data=(json.dumps(self.reaction_data) if self.reaction_data else u'null'), metabolite_data=(json.dumps(self.metabolite_data) if self.metabolite_data else u'null'), style=self._embedded_css(is_local)) return javascript def _draw_js(self, the_id, enable_editing, menu, enable_keys, dev, fill_screen, scroll_behavior): draw = (u"Builder({{ selection: d3.select('#{the_id}')," u"enable_editing: {enable_editing}," u"menu: {menu}," u"enable_keys: {enable_keys}," u"scroll_behavior: {scroll_behavior}," u"fill_screen: {fill_screen}," u"map: map_data_{the_id}," u"cobra_model: cobra_model_{the_id}," u"reaction_data: reaction_data_{the_id}," u"metabolite_data: metabolite_data_{the_id}," u"css: css_string_{the_id} }});").format( the_id=the_id, enable_editing=json.dumps(enable_editing), menu=json.dumps(menu), enable_keys=json.dumps(enable_keys), scroll_behavior=json.dumps(scroll_behavior), fill_screen=json.dumps(fill_screen)) if not dev: draw = u'escher.%s' % draw return draw def _get_html(self, js_source='web', menu='none', scroll_behavior='pan', html_wrapper=False, enable_editing=False, enable_keys=False, minified_js=True, fill_screen=False, height='800px'): """Generate the Escher HTML. Arguments -------- js_source: Can be one of the following: 'web' - (Default) use js files from zakandrewking.github.io/escher. 'local' - use compiled js files in the local escher installation. Works offline. 'dev' - use the local, uncompiled development files. Works offline. menu: Menu bar options include: 'none' - (Default) No menu or buttons. 'zoom' - Just zoom buttons (does not require bootstrap). 'all' - Menu and button bar (requires bootstrap). scroll_behavior: Scroll behavior options: 'pan' - (Default) Pan the map. 'zoom' - Zoom the map. 'none' - No scroll events. minified_js: If True, use the minified version of js files. If js_source is 'dev', then this option is ignored. html_wrapper: If True, return a standalone html file. enable_editing: Enable the editing modes (build, rotate, etc.). enable_keys: Enable keyboard shortcuts. height: The height of the HTML container. """ if js_source not in ['web', 'local', 'dev']: raise Exception('Bad value for js_source: %s' % js_source) if menu not in ['none', 'zoom', 'all']: raise Exception('Bad value for menu: %s' % menu) if scroll_behavior not in ['pan', 'zoom', 'none']: raise Exception('Bad value for scroll_behavior: %s' % scroll_behavior) content = env.get_template('content.html') # if height is not a string if type(height) is int: height = u"%dpx" % height elif type(height) is float: height = u"%fpx" % height elif type(height) is str: height = unicode(height) # set the proper urls is_local = js_source=='local' or js_source=='dev' is_dev = js_source=='dev' d3_url = (join(self.local_host, urls.d3_local) if is_local else urls.d3) escher_url = ("" if js_source=='dev' else (join(self.local_host, urls.escher_min_local) if is_local and minified_js else (join(self.local_host, urls.escher_local) if is_local else (urls.escher_min if minified_js else urls.escher)))) jquery_url = ("" if not menu=='all' else (join(self.local_host, urls.jquery_local) if is_local else urls.jquery)) boot_css_url = ("" if not menu=='all' else (join(self.local_host, urls.boot_css_local) if is_local else urls.boot_css)) boot_js_url
# encoding: utf-8 """ .. codeauthor:: <NAME> <<EMAIL>> """ from __future__ import unicode_literals from copy import deepcopy from datetime import date, datetime, timedelta import pytest import pytz from datetimerange import DateTimeRange from dateutil.parser import parse from dateutil.relativedelta import relativedelta TIMEZONE = "+0900" START_DATETIME_TEXT = "2015-03-22T10:00:00" + TIMEZONE END_DATETIME_TEXT = "2015-03-22T10:10:00" + TIMEZONE TEST_START_DATETIME = parse(START_DATETIME_TEXT) TEST_END_DATETIME = parse(END_DATETIME_TEXT) ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S%z" def setup_module(module): import locale locale.setlocale(locale.LC_ALL, ("C", "ascii")) @pytest.fixture def datetimerange_normal(): value = DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME) value.start_time_format = ISO_TIME_FORMAT value.end_time_format = ISO_TIME_FORMAT return value @pytest.fixture def datetimerange_inversion(): value = DateTimeRange(TEST_END_DATETIME, TEST_START_DATETIME) value.start_time_format = ISO_TIME_FORMAT value.end_time_format = ISO_TIME_FORMAT return value @pytest.fixture def datetimerange_null(): value = DateTimeRange(None, None) value.time_format = None value.end_time_format = None return value @pytest.fixture def datetimerange_null_start(): value = DateTimeRange(None, TEST_END_DATETIME) value.time_format = None value.end_time_format = ISO_TIME_FORMAT return value class Test_DateTimeRange_repr(object): @pytest.mark.parametrize( ["start", "start_format", "end", "end_format", "separator", "is_output_elapse", "expected"], [ [ TEST_START_DATETIME, ISO_TIME_FORMAT, TEST_END_DATETIME, ISO_TIME_FORMAT, " - ", False, "2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900", ], [ "2015-03-22T09:00:00+0900", ISO_TIME_FORMAT, "2015-03-22T10:10:00+0900", ISO_TIME_FORMAT, " - ", False, "2015-03-22T09:00:00+0900 - 2015-03-22T10:10:00+0900", ], [ "2015-03-22T09:00:00", ISO_TIME_FORMAT, "2015-03-22T10:10:00", ISO_TIME_FORMAT, " - ", False, "2015-03-22T09:00:00 - 2015-03-22T10:10:00", ], [ TEST_START_DATETIME, ISO_TIME_FORMAT, TEST_END_DATETIME, ISO_TIME_FORMAT, " - ", True, "2015-03-22T10:00:00+0900 - 2015-03-22T10:10:00+0900 (0:10:00)", ], [ TEST_END_DATETIME, ISO_TIME_FORMAT, TEST_START_DATETIME, ISO_TIME_FORMAT, " - ", True, "2015-03-22T10:10:00+0900 - 2015-03-22T10:00:00+0900 (-1 day, 23:50:00)", ], [ TEST_START_DATETIME, "%Y%m%d%H%M%S", TEST_END_DATETIME, "%Y/%m/%d %H:%M:%S%z", " to ", False, "20150322100000 to 2015/03/22 10:10:00+0900", ], [ None, ISO_TIME_FORMAT, TEST_END_DATETIME, ISO_TIME_FORMAT, " - ", False, "NaT - 2015-03-22T10:10:00+0900", ], [ TEST_START_DATETIME, ISO_TIME_FORMAT, None, ISO_TIME_FORMAT, " - ", False, "2015-03-22T10:00:00+0900 - NaT", ], [ "2015-03-22", "%Y-%m-%d", "2015-04-22", "%Y-%m-%d", " - ", False, "2015-03-22 - 2015-04-22", ], [ date(2015, 3, 22), "%Y-%m-%d", date(2015, 4, 22), "%Y-%m-%d", " - ", False, "2015-03-22 - 2015-04-22", ], ["01:23:45", "%H:%M:%S", "11:23:45", "%H:%M:%S", " - ", False, "01:23:45 - 11:23:45"], [None, ISO_TIME_FORMAT, None, ISO_TIME_FORMAT, " - ", False, "NaT - NaT"], ], ) def test_normal( self, start, start_format, end, end_format, separator, is_output_elapse, expected ): dtr = DateTimeRange(start, end, start_format, end_format) dtr.separator = separator dtr.is_output_elapse = is_output_elapse assert str(dtr) == expected @pytest.mark.parametrize( ["start", "start_format", "end", "end_format", "expected"], [ [ "2015-03-08T00:00:00-0400", ISO_TIME_FORMAT, "2015-03-08T12:00:00-0400", ISO_TIME_FORMAT, "2015-03-08T00:00:00-0400 - 2015-03-08T12:00:00-0300", ], [ "2015-11-01T00:00:00-0400", ISO_TIME_FORMAT, "2015-11-01T12:00:00-0400", ISO_TIME_FORMAT, "2015-11-01T00:00:00-0300 - 2015-11-01T12:00:00-0400", ], ], ) def test_daylight_saving_time(self, start, start_format, end, end_format, expected): dtr = DateTimeRange(start, end, start_format, end_format) assert str(dtr) == expected @pytest.mark.parametrize( ["start", "start_format", "end", "end_format", "separator", "is_output_elapse", "expected"], [ [ TEST_START_DATETIME, None, TEST_END_DATETIME, ISO_TIME_FORMAT, " - ", False, TypeError, ], [ TEST_START_DATETIME, ISO_TIME_FORMAT, TEST_END_DATETIME, None, " - ", False, TypeError, ], [ TEST_START_DATETIME, ISO_TIME_FORMAT, TEST_END_DATETIME, ISO_TIME_FORMAT, None, False, AttributeError, ], ], ) def test_exception( self, start, start_format, end, end_format, separator, is_output_elapse, expected ): dtr = DateTimeRange(start, end, start_format, end_format) dtr.separator = separator dtr.is_output_elapse = is_output_elapse with pytest.raises(expected): str(dtr) class Test_DateTimeRange_eq(object): @pytest.mark.parametrize( ["lhs", "rhs", "expected"], [ [DateTimeRange(None, None), DateTimeRange(None, None), True], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), True, ], [ DateTimeRange("2015-03-22T10:00:00", "2015-03-22T10:10:00"), DateTimeRange("2015-03-22T10:00:00", "2015-03-22T10:10:00"), True, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:20:00+0900"), False, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:02:00+0900", "2015-03-22T10:10:00+0900"), False, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T11:00:00+0900", "2015-03-22T12:10:00+0900"), False, ], [DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), None, False], [None, DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), False], ], ) def test_normal(self, lhs, rhs, expected): assert (lhs == rhs) == expected class Test_DateTimeRange_neq(object): @pytest.mark.parametrize( ["lhs", "rhs", "expected"], [ [DateTimeRange(None, None), DateTimeRange(None, None), False], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), False, ], [ DateTimeRange("2015-03-22T10:00:00", "2015-03-22T10:10:00"), DateTimeRange("2015-03-22T10:00:00", "2015-03-22T10:10:00"), False, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:20:00+0900"), True, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T10:02:00+0900", "2015-03-22T10:10:00+0900"), True, ], [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), DateTimeRange("2015-03-22T11:00:00+0900", "2015-03-22T12:10:00+0900"), True, ], [DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), None, True], [None, DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), True], ], ) def test_normal(self, lhs, rhs, expected): assert (lhs != rhs) == expected class Test_DateTimeRange_add(object): @pytest.mark.parametrize( ["value", "add_value", "expected"], [ [ DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900"), timedelta(seconds=10 * 60), DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:20:00+0900"), ], [ DateTimeRange("2015-03-22T10:00:00", "2015-03-22T10:10:00"), timedelta(seconds=-10 * 60), DateTimeRange("2015-03-22T09:50:00", "2015-03-22T10:00:00"), ], ], ) def test_normal(self, value, add_value, expected): new_datetimerange = value + add_value assert new_datetimerange == expected @pytest.mark.parametrize( ["value", "expected"], [["2015-03-22T10:10:00+0900", TypeError], [1, TypeError], [None, TypeError]], ) def test_exception(self, datetimerange_normal, value, expected): with pytest.raises(TypeError): datetimerange_normal + value def test_null(self, datetimerange_null): with pytest.raises(TypeError): datetimerange_null + timedelta(seconds=10 * 60) class Test_DateTimeRange_iadd(object): def test_normal(self): value = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") expected = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:20:00+0900") value += timedelta(seconds=10 * 60) assert value == expected @pytest.mark.parametrize( ["value", "expected"], [["2015-03-22T10:10:00+0900", TypeError], [1, TypeError], [None, TypeError]], ) def test_exception(self, datetimerange_normal, value, expected): with pytest.raises(TypeError): datetimerange_normal += value def test_null(self, datetimerange_null): with pytest.raises(TypeError): datetimerange_null += timedelta(seconds=10 * 60) class Test_DateTimeRange_sub(object): def test_normal(self): value = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:20:00+0900") expected = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") new_datetimerange = value - timedelta(seconds=10 * 60) assert new_datetimerange == expected @pytest.mark.parametrize( ["value", "expected"], [["2015-03-22T10:10:00+0900", TypeError], [1, TypeError], [None, TypeError]], ) def test_exception(self, datetimerange_normal, value, expected): with pytest.raises(TypeError): datetimerange_normal - value def test_null(self, datetimerange_null): with pytest.raises(TypeError): datetimerange_null - timedelta(seconds=10 * 60) class Test_DateTimeRange_isub(object): def test_normal(self): value = DateTimeRange("2015-03-22T10:10:00+0900", "2015-03-22T10:20:00+0900") expected = DateTimeRange("2015-03-22T10:00:00+0900", "2015-03-22T10:10:00+0900") value -= timedelta(seconds=10 * 60) assert value == expected @pytest.mark.parametrize( ["value", "expected"], [["2015-03-22T10:10:00+0900", TypeError], [1, TypeError], [None, TypeError]], ) def test_exception(self, datetimerange_normal, value, expected): with pytest.raises(TypeError): datetimerange_normal -= value def test_null(self, datetimerange_null): with pytest.raises(TypeError): datetimerange_null -= timedelta(seconds=10 * 60) class Test_DateTimeRange_contains(object): @pytest.mark.parametrize( ["value", "expected"], [ [START_DATETIME_TEXT, True], [END_DATETIME_TEXT, True], [TEST_START_DATETIME, True], [TEST_END_DATETIME, True], [ DateTimeRange("2015-03-22 10:05:00" + TIMEZONE, "2015-03-22 10:06:00" + TIMEZONE), True, ], [ DateTimeRange("2015-03-22 10:10:01" + TIMEZONE, "2015-03-22 10:11:01" + TIMEZONE), False, ], ["2015-03-22 09:59:59" + TIMEZONE, False], ["2015-03-22 10:10:01" + TIMEZONE, False], ], ) def test_normal(self, datetimerange_normal, value, expected): assert (value in datetimerange_normal) == expected @pytest.mark.parametrize( ["value", "expected"], [[None, TypeError], [False, TypeError], [20140513221937, TypeError]] ) def test_exception(self, datetimerange_normal, value, expected): with pytest.raises(expected): value in datetimerange_normal @pytest.mark.parametrize( ["value", "expected"], [[TEST_START_DATETIME, TypeError], ["aaa", TypeError], [None, TypeError]], ) def test_null_start(self, datetimerange_null_start, value, expected): with pytest.raises(expected): value in datetimerange_null_start class Test_DateTimeRange_timedelta(object): def test_normal(self, datetimerange_normal): assert datetimerange_normal.timedelta == timedelta(seconds=10 * 60) @pytest.mark.parametrize( ["start", "end", "expected"], [ [ "2015-03-08T00:00:00-0400", "2015-03-08T12:00:00-0400", timedelta(0, 39600), # 11 hours ], [ "2015-11-01T00:00:00-0400", "2015-11-01T12:00:00-0400", timedelta(0, 46800), # 13 hours ], ], ) def test_daylight_saving_time(self, start, end, expected): dtr = DateTimeRange(start, end) assert dtr.timedelta == expected def test_inversion(self, datetimerange_inversion): assert datetimerange_inversion.timedelta == timedelta(-1, 85800) def test_null(self, datetimerange_null): with pytest.raises(TypeError): datetimerange_null.timedelta def test_exception(self, datetimerange_null_start): with pytest.raises(TypeError): datetimerange_null_start.timedelta class Test_DateTimeRange_is_set(object): @pytest.mark.parametrize( ["value", "expected"], [ [DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), True], [DateTimeRange(TEST_END_DATETIME, TEST_START_DATETIME), True], [DateTimeRange(TEST_START_DATETIME, None), False], [DateTimeRange(None, TEST_START_DATETIME), False], [DateTimeRange(None, None), False], ], ) def test_normal(self, value, expected): assert value.is_set() == expected class Test_DateTimeRange_validate_time_inversion(object): @pytest.mark.parametrize( ["value"], [ [DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME)], [DateTimeRange(TEST_START_DATETIME, TEST_START_DATETIME)], ], ) def test_normal(self, value): value.validate_time_inversion() def test_inversion(self, datetimerange_inversion): with pytest.raises(ValueError): datetimerange_inversion.validate_time_inversion() @pytest.mark.parametrize( ["value"], [ [DateTimeRange(None, None)], [DateTimeRange(None, TEST_END_DATETIME)], [DateTimeRange(TEST_START_DATETIME, None)], ], ) def test_exception(self, value): with pytest.raises(TypeError): value.validate_time_inversion() class Test_DateTimeRange_is_valid_timerange(object): @pytest.mark.parametrize( ["value", "expected"], [ [DateTimeRange(TEST_START_DATETIME, TEST_END_DATETIME), True], [DateTimeRange(TEST_END_DATETIME, TEST_START_DATETIME), False], [DateTimeRange(TEST_START_DATETIME, None), False], [DateTimeRange(None, TEST_START_DATETIME), False], [DateTimeRange(None, None), False], ], ) def test_normal(self, value, expected): assert value.is_valid_timerange() == expected class Test_DateTimeRange_range(object): @pytest.mark.parametrize( ["value", "step", "expected"], [ [ DateTimeRange(datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 22, 0, 1, 0)), timedelta(seconds=20), [ datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 22, 0, 0, 20), datetime(2015, 3, 22, 0, 0, 40), datetime(2015, 3, 22, 0, 1, 00), ], ], [ DateTimeRange(datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 23, 0, 0, 0)), relativedelta(hours=+6), [ datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 22, 6, 0, 0), datetime(2015, 3, 22, 12, 0, 0), datetime(2015, 3, 22, 18, 0, 0), datetime(2015, 3, 23, 0, 0, 0), ], ], [ DateTimeRange(datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 23, 0, 0, 0)), relativedelta(months=+6), [datetime(2015, 3, 22, 0, 0, 0)], ], [ DateTimeRange("2015-01-01T00:00:00+0900", "2016-01-01T00:00:00+0900"), relativedelta(months=+4), [ parse("2015-01-01T00:00:00+0900"), parse("2015-05-01T00:00:00+0900"), parse("2015-09-01T00:00:00+0900"), parse("2016-01-01T00:00:00+0900"), ], ], [ DateTimeRange(datetime(2015, 3, 23, 0, 0, 0), datetime(2015, 3, 22, 0, 0, 0)), relativedelta(hours=-6), [ datetime(2015, 3, 23, 0, 0, 0), datetime(2015, 3, 22, 18, 0, 0), datetime(2015, 3, 22, 12, 0, 0), datetime(2015, 3, 22, 6, 0, 0), datetime(2015, 3, 22, 0, 0, 0), ], ], [ DateTimeRange(date(2015, 3, 23), date(2015, 3, 26)), relativedelta(days=+1), [ datetime(2015, 3, 23, 0, 0, 0), datetime(2015, 3, 24, 0, 0, 0), datetime(2015, 3, 25, 0, 0, 0), datetime(2015, 3, 26, 0, 0, 0), ], ], ], ) def test_normal(self, value, step, expected): for value_item, expected_item in zip(value.range(step), expected): assert value_item == expected_item @pytest.mark.parametrize( ["value", "step", "expected"], [ [ DateTimeRange(datetime(2015, 3, 22, 0, 0, 0), datetime(2015, 3, 22, 0, 1, 0)), relativedelta(seconds=-60), ValueError, ],
in X.residues[res2]: if X.dist(atom,atom2)<6.0: if not res2 in excludes[tg]: excludes[tg].append(res2) break for tg in sorted(excludes.keys()): print tg print excludes[tg] print '-------' return excludes # # -------- # def calculate_average(self,data): """Calculate the average ghost observed and the standard deviation""" for datatype in data.keys(): for diel in data[datatype].keys(): for TG in data[datatype][diel].keys(): for residue in data[datatype][diel][TG].keys(): for nucleus in data[datatype][diel][TG][residue].keys(): try: values=data[datatype][diel][TG][residue][nucleus] except KeyError: print 'Skipping %d for %s' %(diel) continue import stats avg=stats.average(values) SD=stats.stddev(values) data[datatype][diel][TG][residue][nucleus]=[avg,SD] return data # # ----- # def load_all_calculated(self,exp_ghosts): """Load all current predictions of ghosts""" predictions=[['Xray_avg.pickle','Xray'],['MD_calcghosts_nowat.pickle','MD_nowat'],['average_calcghosts_predrag_all.pickle','MD'],['Xrays/all_ghosts_2LZT_H.pdb.pickle','2LZT']] # ['average_calcghosts_predragGB.pickle','GB'], alldata={} for filename,ensemble_name in predictions: import os filename=os.path.join(os.getcwd(),self.options.calcghostdir,filename) calc_ghosts=self.load_calcghosts(self.options,filename) print print '=================%s====================' %ensemble_name print big_satisfied=self.find_opteps(exp_ghosts,calc_ghosts) alldata[ensemble_name]=self.restraint_plot(big_satisfied) # # Do the full plot # import pylab colours=['k','r','b','g','y','c','m','grey','orange','pink'] pylab.clf() count=0 sumsum=[] for ensname in sorted(alldata.keys()): for xs,present,absent,method in alldata[ensname]: text='%s:%s' %(ensname,method) col=colours[count] marker='' if ensname=='GB': marker='o' pylab.plot(xs,present,'-',marker=marker,color=col,label=text,linewidth=3) pylab.plot(xs,absent,'--',marker=marker,color=col,linewidth=3) count=count+1 if count==len(colours): count=0 # # Find the best eps # ecount=0 maxval=-999.9 besteps=0.0 for val in present: if val>maxval: maxval=val besteps=ecount ecount=ecount+1 print 'Best eps for %s present: %5.1f with %5.1f %% satisfied, at this eps absent is %5.1f' %(text,xs[besteps],maxval,absent[besteps]) sumsum.append([maxval+absent[besteps],xs[besteps],maxval,absent[besteps],text]) sumsum.sort() print 'Sum, Eps, %present, %absent, method' for sum,eps,present,absent,text in sumsum: print '%5.1f, %5.1f, %5.1f, %5.1f %s' %(sum,eps,present,absent,text) # # Finish the plot # pylab.ylim((0.0,100.0)) pylab.xlim((0.0,30.0)) pylab.legend() pylab.title('Satisfied restraints for %s' %self.options.atom) pylab.xlabel('Dielectric constant') pylab.ylabel('% restraints satisfied') pylab.savefig('bigplot.png',dpi=300) pylab.show() return # # ----- # def load_calcghosts(self,options,loadfile=None): """Load the calculated ghosts from a single file or a directory""" if options.loaddir: # Load all files in the directory specified import os if os.path.isdir(options.loaddir): files=os.listdir(options.loaddir) okfiles=[] for fn in files: txt='_H.pdb.pickle' size=len(txt) if fn[-size:]!=txt: continue okfiles.append(fn) print 'Found %3d files with calculated ghosts' %len(okfiles) C=False count=1 for fn in sorted(okfiles): print 'Reading # %3d with name: %s' %(count,fn) count=count+1 realfile=os.path.join(options.loaddir,fn) fd=open(realfile) import pickle data=pickle.load(fd) fd.close() if not C: C=data.copy() # for datatype in data.keys(): for diel in data[datatype].keys(): if not C[datatype].has_key(diel): C[datatype][diel]={} for TG in data[datatype][diel].keys(): if not C[datatype][diel].has_key(TG): C[datatype][diel][TG]={} for residue in data[datatype][diel][TG].keys(): if not C[datatype][diel][TG].has_key(residue): C[datatype][diel][TG][residue]={} for nucleus in data[datatype][diel][TG][residue].keys(): if not C[datatype][diel][TG][residue].has_key(nucleus): C[datatype][diel][TG][residue][nucleus]=[] # value=data[datatype][diel][TG][residue][nucleus] if not type(C[datatype][diel][TG][residue][nucleus]) is type([]): C[datatype][diel][TG][residue][nucleus]=[] C[datatype][diel][TG][residue][nucleus].append(value) # # Get the averages and standard deviations # calc_ghosts=self.calculate_average(C) avgfile=options.avgfile print 'Saving average ghosts to %s' %avgfile fd=open(avgfile,'w') import pickle pickle.dump(calc_ghosts,fd) fd.close() else: raise Exception('Not a directory: %s' %options.loaddir) else: # # Load just a single file # filename=options.calcghosts if loadfile: filename=loadfile fd=open(filename) import pickle print 'Reading calculated ghosts from %s' %filename calc_ghosts=pickle.load(fd) fd.close() return calc_ghosts # # ------- # def stats_forpaper(self,exp_ghosts): """Get the stats for the paper""" # Get the excludes excludes=self.find_excludes(options,exp_ghosts) # Loop over all ghosts for atom in ['N','H','HA']: values=[] absent=[] for tg in sorted(exp_ghosts.keys()): for residue in sorted(exp_ghosts[tg].keys()): if excludes.has_key(tg): if residue in excludes[tg]: continue # # Get the value # if exp_ghosts[tg][residue].has_key(atom): exp_value=exp_ghosts[tg][residue][atom] exp_error=errors[atom] if exp_ghosts[tg][residue].has_key(atom+'_error'): exp_error=exp_ghosts[tg][residue][atom+'_error'] # # Deal with ranges # if exp_value[0]=='q' and options.use_questionable: exp_value=exp_value[1:] # # Deal with ranges - we need to do this better # if len(exp_value.split(';'))==2: s=[] for val in exp_value.split(';'): if val[0] in ['<','>']: val=val[1:] s.append(float(val)) exp_error=abs(s[0]-s[1]) exp_value=float(sum(s))/len(s) if exp_value=='absent': absent.append(residue) else: values.append(abs(float(exp_value))) # # Calculate average # import stats avg=0.0 SD=0.0 if len(values)>0: avg=stats.average(values) SD=stats.stddev(values) print '%2s ghost titrations: %3d, avg: %5.2f (%5.2f), %3d absent ghost titrations' %(atom,len(values),avg,SD,len(absent)) return # # ------ # def find_opteps(self,exp_ghosts,calc_ghosts): """Find the optimal eps value for each residue and overall""" # # Instantiate the dummy scoring function # S=dummy_epsmap() # # Find the ghosts to exclude # excludes=self.find_excludes(options,exp_ghosts) # # Initialize arrays # dep={} bestepses={} bigdata={} big_satisfied={} # # Start looping # for method in calc_ghosts.keys(): # # Smoothed PBE calculations gave no improvement # if method=='sPBE': continue # # Method is the calculational method # dep[method]={} bestepses[method]={} big_satisfied[method]={} # bigdata[method]={} # for tg in sorted(exp_ghosts.keys()): tgcount=0 wrong_sign=[] # # First find best eps # scores=[] data={} bigdata[method][tg]={} big_satisfied[method][tg]={} for eps in sorted(calc_ghosts[method]): # # Get the RMSD for this eps # xs=[] ys=[] experrors=[] calcerrors=[] residues=[] big_satisfied[method][tg][eps]={} # # Loop over all residues # atom=options.atom if not calc_ghosts[method][eps].has_key(tg): #print 'no TG in calc',tg continue #print 'CHECK',tg,eps,method for residue in sorted(exp_ghosts[tg].keys()): # # Catch for uncalculated ghosts from MD sims # if not calc_ghosts[method][eps][tg].has_key(residue): #print 'missing residue',residue continue #print calc_ghosts[method][eps][tg].keys() # # # if excludes.has_key(tg): if residue in excludes[tg]: continue if exp_ghosts[tg][residue].has_key(atom): exp_value=exp_ghosts[tg][residue][atom] exp_error=errors[atom] if exp_ghosts[tg][residue].has_key(atom+'_error'): exp_error=exp_ghosts[tg][residue][atom+'_error'] # # Get the calculated value - and the error if specified # if not calc_ghosts[method][eps][tg][residue].has_key(atom): #print 'No calculated result for %s %d %s %s %s' %(method,eps,tg,residue,atom) continue #else: # print 'Found calc result for %s %d %s %s %s' %(method,eps,tg,residue,atom) calc_value=calc_ghosts[method][eps][tg][residue][atom] calc_error=0.0 if type(calc_value) is type([]): calc_error=calc_value[1] calc_value=calc_value[0] # if options.no_calcerror: calc_error=0.0 # error,satisfied,abs_sat,tot_restraints,tot_abs,real_error=S.get_error_sub(exp_value,calc_value,exp_error,atom,calc_error=calc_error) big_satisfied[method][tg][eps][residue]=[satisfied,abs_sat,tot_abs] exp_error=errors[atom] if exp_value[0]=='q': exp_value=exp_value[1:] if len(exp_value.split(';'))==2: s=[] for val in exp_value.split(';'): if val[0] in ['<','>']: val=val[1:] s.append(float(val)) exp_error=abs(s[0]-s[1]) exp_value=float(sum(s))/len(s) # # Append the values to xs and ys # if exp_value!='absent': exp_value=float(exp_value) xs.append(exp_value) experrors.append(exp_error) # ys.append(calc_value) calcerrors.append(calc_error) residues.append(residue) # if exp_value*calc_value<0.0: wrong_sign.append(residue) # # Compared all ghosts for this tg at this eps # if len(xs)==0: continue scores.append([self.RMSD(xs,ys,experrors,calcerrors),eps]) bigdata[method][tg][eps]=[xs,ys,experrors,calcerrors,residues] data[eps]=[xs,ys,residues,experrors,calcerrors] # # Looped over all eps # dep[method][tg]=None if len(scores)==0: print 'Skipping %s because I have no points left' %tg continue # # Format for dep # nscore=[] neps=[] for rmsd,eps in scores: nscore.append(rmsd) neps.append(eps/10.0) dep[method][tg]=[nscore[:],neps[:]] scores.sort() best_eps=scores[0][1] print 'Best eps for %12s using %12s is %5.1f, RMSD: %5.2f, numpoints: %3d' %(tg,method,best_eps/10.0,scores[0][0],len(data[best_eps][0])) # # Dig out the points we need # xs,ys,residues,experrors,calcerrors=data[best_eps] bestepses[method][tg]=data[best_eps][:]+[best_eps] import pylab xs1=[] ys1=[] err1=[] calc_err1=[] # xs2=[] ys2=[] err2=[] calc_err2=[] # for count in range(len(xs)): res=residues[count].split(':')[-1] if int(res)>40 and int(res)<=85: xs1.append(xs[count]) ys1.append(ys[count]) err1.append(experrors[count]) calc_err1.append(calcerrors[count]) else: xs2.append(xs[count]) ys2.append(ys[count]) err2.append(experrors[count]) calc_err2.append(calcerrors[count]) # if len(xs1)>0: pylab.errorbar(xs1,ys1,xerr=err1,yerr=calc_err1,fmt='ro',label='beta') if len(xs2)>0: pylab.errorbar(xs2,ys2,xerr=err2,yerr=calc_err2,fmt='bo',label='alpha') # import stats if len(xs)==0: continue #print xs #print ys #corr=stats.correlation(xs,ys) rmsd=self.RMSD(xs,ys,experrors,calcerrors) # pylab.plot(xs,xs,'y-') pylab.plot([min(xs),max(xs)],[0,0],'g-') pylab.xlabel('Experimental dCS') pylab.ylabel('Calculated dCS') pylab.title('%s %s, atom: %s, RMSD: %5.3f, eps_opt: %5.1f' %(method,tg,atom,rmsd,best_eps/10.0)) pylab.legend(loc=8) pylab.savefig('%s/Opteps_%s_%s_%s.png' %(options.plotdir,method,tg,atom)) #pylab.show() pylab.clf() # -------------------------- # # Done with everything for this method # Find best overall RMSD # print print 'Finding best overall eps for each method' print alldata={} for method in bigdata.keys(): tgs=sorted(bigdata[method].keys()) tg0=':0035:GLU' rmsds=[] alldata[method]={} for eps in bigdata[method][tg0].keys(): xs=[] ys=[] experrors=[] residues=[] for tg in tgs: if bigdata[method][tg].has_key(eps): xs=xs+bigdata[method][tg][eps][0] ys=ys+bigdata[method][tg][eps][1] experrors=experrors+bigdata[method][tg][eps][2] calcerrors=calcerrors+bigdata[method][tg][eps][3] residues.append([tg,bigdata[method][tg][eps][4]]) rmsds.append([self.RMSD(xs,ys,experrors,calcerrors),eps]) alldata[method][eps]=[xs,ys,experrors,calcerrors,residues] rmsds.sort() #print rmsds #print tgs #print bigdata[method][tg0].keys() besteps=rmsds[0][1] print 'Best eps for %12s is %5.1f with RMSD: %6.3f' %(method,besteps,rmsds[0][0]) import pylab xs,ys,experrors,calcerrors,residues=alldata[method][besteps] return big_satisfied # # ----- # def structure_size_error(self): """Plot the size of the erors on the structure""" # # Let Yasara show the structure and color it according to the size of the error # raise Exception('function not completed') import Yasara Y=Yasara.yasara_handler() obj=Y.load_mol('2LZT.pdb') Y.yasara.run('HideAtom Sidechain') tgnum=int(tg.split(':')[1]) Y.yasara.ColorRes('%d' %tgnum,'magenta') Y.yasara.run('ShowRes %s' %tgnum) for residue in sorted(rescolor.keys()): resnum=int(residue[1:]) if rescolor.has_key(residue): color=rescolor[residue] else: color=1059 Y.yasara.ColorRes('%d' %(resnum),'%d' %color) print sorted(wrong_sign) raw_input('is this ok?') return # # ----- # def restraint_plot(self,big_satisfied): """ Make a plot of restraints satisfied vs. eps for each method """ import pylab plotdata=[] for method in big_satisfied.keys(): xs=[] present=[] absent=[] tgs=big_satisfied[method].keys() for eps in sorted(big_satisfied[method][tgs[0]].keys()): sat=[] abs_sat=[] for tg in tgs: for residue in big_satisfied[method][tg][eps].keys(): data=big_satisfied[method][tg][eps][residue] if data[2]==1: abs_sat.append(data[1]) else: sat.append(data[0]) sat=sum(sat)/float(len(sat))*100.0 abse=sum(abs_sat)/float(len(abs_sat))*100.0 present.append(sat) xs.append(eps/10.0) absent.append(abse) # pylab.plot(xs,present,'-',label=method,linewidth=3) pylab.plot(xs,absent,'--',label='abs %s' %method,linewidth=3) plotdata.append([xs,present,absent,method]) pylab.legend() pylab.ylim((0.0,100.0)) pylab.xlabel('Dielectric constant') pylab.ylabel('% of restraints satisfied') pylab.savefig('%s/restraints.png' %self.options.plotdir,dpi=300) #pylab.show() return plotdata # # -------- # if __name__=='__main__': print print 'Analysis of ghost titrations' print '<NAME>, 2011' print import optparse parser=optparse.OptionParser() # parser.add_option('-x','--experimentalghosts',dest='expfile',default='all_restraints/new_restraints.pickle',type='string', help='Pickle file containing the experimentally determined ghost restraints. Default: %default') # parser.add_option('--heatmap',dest='heatmap',action='store_true',default=False, help='Make heatmap plot of titrations. Default: %default') parser.add_option('-y','--yasarafig',dest='yasarafig',action='store_true',default=False, help='Show the ghosts on the structure. Default: %default') parser.add_option('-t','--titgroup',dest='titgroup',action='store',default=':0035:GLU', help='Titratable group selection. Default: %default') parser.add_option('-a','--atom',dest='atom',action='store',default='N', help='Atom to examine. Default: %default') parser.add_option('--pdb',dest='pdbfile',action='store',default='2LZT_H.pdb', help='PDB file. Default: %default') parser.add_option('--useabsent',dest='useabsent',action='store_true',default=False, help='Include absent ghosts in the plots and RMSDs (only enabled for cubescanplot. Default: %default') parser.add_option('--use_questionable',dest='use_questionable',default=True,action='store_true',
the barge in a way that is consistent with the # DOE database by allocating the barge as a non-oil cargo barge # that will pose a fuel-oil spill risk only. if not oil_type: fuel_spill = True oil_type = None # *** END ERROR CATCH *** elif ( destination in US_origin_destination and destination not in WA_in_noinfo[vessel_type] ): oil_type = get_oil_type_cargo( WA_in_yaml, destination, vessel_type, random_generator ) # *** ERROR CATCH *** # Same explanation as given above, in # 'elif origin in US_origin_destination' if not oil_type: fuel_spill = True oil_type = None # *** END ERROR CATCH *** elif destination in CAD_origin_destination: if destination == "Westridge Marine Terminal": # Westridge doesn't receive crude for storage oil_type = "jet" else: oil_type = get_oil_type_cargo( CAD_yaml, destination, vessel_type, random_generator ) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Remaining cases are those that were not linked to an oil terminal # in our origin-destination analysis for transport to/from # known oil-transfer facilities. Tracks were not joined (and will # have null values for origin/destination) if adjacent # ship tracks are (a) < 1 km long, (b) over 4 hours apart, (c) # requiring > 80 knts to join. The tracks that lack details of # origin-destination fall into the category of ship tracks that # may or may not be oil-traffic. As such, I first use probability # of oil-cargo for tank barge traffic to # weight whether the ship track represents an oil-carge & fuel spill # risk (fuel_spill = False) or a fuel-spill risk only (fuel_spill = True). # For the cases in which fuel_spill is False, I use origin # as 'US','Pacific' or 'Canada' to specify cargo allocation # NOTE: Currently Canada == US # ALSO NOTE: Once the tracks that are identified as potential # cargo-spill tracks (fuel_spill = False), they will still be treated # like any other tank traffic with an .8/.2 probility of # cargo/fuel spill. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ elif origin == "Pacific": fuel_spill = random_generator.choice( [False, True], p=[probability_oilcargo, probability_fuelonly] ) if fuel_spill: oil_type = None else: oil_type = get_oil_type_cargo( Pacific_yaml, None, vessel_type, random_generator ) elif origin == "US": fuel_spill = random_generator.choice( [False, True], p=[probability_oilcargo, probability_fuelonly] ) if fuel_spill: oil_type = None else: oil_type = get_oil_type_cargo(US_yaml, None, vessel_type, random_generator) elif origin == "Canada": fuel_spill = random_generator.choice( [False, True], p=[probability_oilcargo, probability_fuelonly] ) if fuel_spill: oil_type = None else: oil_type = get_oil_type_cargo(US_yaml, None, vessel_type, random_generator) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Remaining cases have null values for origin destination. # I first use probability of oil-cargo for tank barge traffic to # weight whether the ship track is an oil-cargo & fuel spill risk # (fuel_spill = False) or a fuel-spill risk only (fuel_spill = True) # For the cases in which fuel_spill is False, I use the US_generic fuel allocation # to attribute fuel type. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ else: fuel_spill = random_generator.choice( [False, True], p=[probability_oilcargo, probability_fuelonly] ) if fuel_spill: oil_type = None else: oil_type = get_oil_type_cargo(US_yaml, None, vessel_type, random_generator) return oil_type, fuel_spill def get_oil_type_tanker( oil_attrs, origin, destination, transport_data_dir, random_generator ): """Randomly choose type of cargo oil spilled from a tanker based on AIS track origin & destination, and oil cargo attribution analysis. Decision tree for allocating oil type to tanker traffic see Google drawing [Tanker_Oil_Attribution](https://docs.google.com/drawings/d/1-4gl2yNNWxqXK-IOr4KNZxO-awBC-bNrjRNrt86fykU/edit) for a visual representation. :param dict oil_attrs: Oil attribution information from the output of make_oil_attrs.py. :param str or None origin: Origin of AIS track from which spill occurs. :param str or None destination: Destination of AIS track from which spill occurs. :param transport_data_dir: Directory path to marine_transport_data files repository cloned from https://github.com/MIDOSS/marine_transport_data. :type transport_data_dir: :py:class:`pathlib.Path` :param random_generator: PCG-64 random number generator :type random_generator: :py:class:`numpy.random.Generator` :return: Type of oil spilled. :rtype: str """ vessel_type = "tanker" # Assign US and CAD origin/destinations from oil_attrs file CAD_origin_destination = oil_attrs["categories"]["CAD_origin_destination"] US_origin_destination = oil_attrs["categories"]["US_origin_destination"] # Get cargo oil type attribution information from oil-type yaml files yaml_file = transport_data_dir / Path(oil_attrs["files"]["CAD_origin"]).name with yaml_file.open("rt") as f: CAD_yaml = yaml.safe_load(f) yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_destination"]).name with yaml_file.open("rt") as f: WA_in_yaml = yaml.safe_load(f) WA_in_noinfo = _calc_no_info_facilities(WA_in_yaml) yaml_file = transport_data_dir / Path(oil_attrs["files"]["WA_origin"]).name with yaml_file.open("rt") as f: WA_out_yaml = yaml.safe_load(f) WA_out_noinfo = _calc_no_info_facilities(WA_out_yaml) # US_origin is for US as origin yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_origin"]).name with yaml_file.open("rt") as f: US_yaml = yaml.safe_load(f) # US_combined represents the combined import and export of oil yaml_file = transport_data_dir / Path(oil_attrs["files"]["US_combined"]).name with yaml_file.open("rt") as f: USall_yaml = yaml.safe_load(f) yaml_file = transport_data_dir / Path(oil_attrs["files"]["Pacific_origin"]).name with yaml_file.open("rt") as f: Pacific_yaml = yaml.safe_load(f) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # NOTE: these pairs need to be used together for # "get_oil_type_cargo" (but don't yet have error-checks in place): # - "WA_in_yaml" and "destination" # - "WA_out_yaml" and "origin" # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ if origin in CAD_origin_destination: if origin == "Westridge Marine Terminal": oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) else: if destination in US_origin_destination: # we have better information on WA fuel transfers, # so I'm prioritizing this information source oil_type = get_oil_type_cargo( WA_in_yaml, destination, vessel_type, random_generator ) else: oil_type = get_oil_type_cargo( CAD_yaml, origin, vessel_type, random_generator ) elif origin in US_origin_destination and origin not in WA_out_noinfo[vessel_type]: oil_type = get_oil_type_cargo( WA_out_yaml, origin, vessel_type, random_generator ) elif ( destination in US_origin_destination and destination not in WA_in_noinfo[vessel_type] ): oil_type = get_oil_type_cargo( WA_in_yaml, destination, vessel_type, random_generator ) elif destination in CAD_origin_destination: oil_type = get_oil_type_cargo( CAD_yaml, destination, vessel_type, random_generator ) elif origin == "Pacific": oil_type = get_oil_type_cargo(Pacific_yaml, None, vessel_type, random_generator) elif origin == "US": oil_type = get_oil_type_cargo(US_yaml, None, vessel_type, random_generator) else: # Currently, this is a catch for all ship tracks not allocated with origin or destination # It's a generic oil type attribution from the combined US import and export oil_type = get_oil_type_cargo(USall_yaml, None, vessel_type, random_generator) return oil_type def _calc_no_info_facilities(oil_xfer_info): """Calculate vessel type keyed dict of lists of facilities for which there is no oil transfer data. :param dict oil_xfer_info: Oil transfer information from regional oil attribution YAML file. :return: Dictionary of lists of facilities for which there is no oil transfer data, keyed by vessel type :rtype: dict """ no_info_facilities = collections.defaultdict(list) for facility in oil_xfer_info: for vessel_type in oil_xfer_info[facility]: try: transfers = sum( [ oil_xfer_info[facility][vessel_type][oil_type][ "number_of_transfers" ] for oil_type in oil_xfer_info[facility][vessel_type] ] ) except TypeError: # Handle "names" stanza in YAML continue if transfers == 0: no_info_facilities[vessel_type].append(facility) return no_info_facilities def get_oil_type_cargo(cargo_info, facility, vessel_type, random_generator): """Randomly choose cargo oil type based on facility and vessel type by querying information in input yaml_file. :param dict cargo_info: Cargo oil type attribution information from the output of a make_cargo_*.ipynb notebooks. :param str or None facility: Vessel origin from AIS. :param str vessel_type: Vessel type from which spill occurs. :param random_generator: PCG-64 random number generator :type random_generator: :py:class:`numpy.random.Generator` :return: Cargo oil type. :rtype: str """ try: ship = cargo_info[facility][vessel_type] except KeyError: ship = cargo_info[vessel_type] raw_probs = numpy.array([ship[oil_type]["fraction_of_total"] for oil_type in ship]) if abs(raw_probs.sum() - 1) > 1e-4: msg = ( f"Probable data entry error - sum of raw probabilities is not close to 1: " f"{raw_probs.sum()} for {facility=}, {vessel_type=}" ) logging.warning(msg) raise ValueError(msg) oil_type = random_generator.choice(list(ship.keys()), p=raw_probs / raw_probs.sum()) return oil_type def write_csv_file(df, csv_file): """ :param df: Dataframe to write to CSV file. :type df: :py:class:`pandas.DataFrame` :param str csv_file: File path and name of CSV file to write to. """ df.to_csv(csv_file, index=False, date_format="%Y-%m-%d %H:%M") logging.info(f"wrote CSV file to {csv_file}") @click.command( help=""" Calculate and store a CSV file containing parameters of a set of random oil spills to drive Monte Carlo runs of MOHID. \b Please see \b https://ubc-moad-tools.readthedocs.io/en/latest/moad_tools.html#moad_tools.midoss.random_oil_spills.cli for more information about arguments and options, and \b https://ubc-moad-tools.readthedocs.io/en/latest/moad_tools.html#processing-configuration-yaml-file for details of the contents of the config file. """ ) @click.version_option() @click.argument("n_spills", type=int) @click.argument( "config_file", type=click.Path(exists=True, readable=True, file_okay=True, dir_okay=False), ) @click.argument("csv_file", type=click.Path(writable=True)) @click.option( "-v", "--verbosity", default="warning", show_default=True, type=click.Choice(("debug", "info", "warning", "error", "critical")), help=""" Choose how much information you want to see about the progress of the calculation; warning, error, and critical should be silent unless something bad goes wrong. """, ) def cli(n_spills, config_file, csv_file, verbosity): """Command-line interface for :py:func:`moad_tools.midoss.random_oil_spills`. :param int
<reponame>TranslatorIIPrototypes/robo-commons import collections import json import logging import requests import traceback import re import os import sys import yaml import unittest from jinja2 import Template from collections import defaultdict from collections import namedtuple from greent.concept import Concept from greent.concept import ConceptModel from greent.identifiers import Identifiers from greent.node_types import node_types from greent.service import Service from greent.util import LoggingUtil from greent.util import Resource from greent.util import DataStructure from greent.util import Text from pprint import pprint from greent.graph_components import KNode,KEdge from greent import node_types from pyld import jsonld logger = LoggingUtil.init_logging (__name__, level=logging.DEBUG) # TODO: bind to biolink et al node_type_map = { "gene" : "G", "drug" : "S", "process" : "P", "cell" : "C", "anatomy" : "A", "phenotype" : "P", "disease" : "D", "genetic_condition" : "X", "pathway" : "W", } def get_node_type (out_concept): out_concept = out_concept.lower () node_type = node_type_map[out_concept] if out_concept in node_type_map else None node_type = node_types.type_codes [node_type] return node_type def update_dict(d, u): for k, v in u.items(): if isinstance(v, collections.Mapping): d[k] = update_dict(d.get(k, {}), v) else: d[k] = v return d def defaultdict_to_regular(d): """ Recursively convert a defaultdict to a dict. """ if isinstance(d, defaultdict): d = {k: defaultdict_to_regular(v) for k, v in d.items()} return d punctuation = re.compile('[ ?=\./:{}]+') trailing_under = re.compile('\/$') class MethodMetadata: """ Metadata about a method dynamically discovered from the translator registry and attached to a TranslatorRegistry instance. """ def __init__(self, api, path, in_types, in_concept, out_type, predicate, obj_path, out_concept=None, op=None): self.api = api self.path = path method_path = trailing_under.sub ('', self.path) self.in_types = in_types self.in_concept = in_concept self.out_type = out_type self.predicate = predicate self.obj_path = obj_path self.out_concept = out_concept self.op = punctuation.sub ('_', f"{self.api}_{method_path}") def __repr__(self): return f"in_concept: {self.in_concept} out_concept: {self.out_concept} in_types: {self.in_types} out_type: {self.out_type} path: {self.obj_path} op: {self.op}" class TranslatorRegistry(Service): """ Interact with Translator services. """ def __init__(self, context): """ Read the Translator Registry root document. For each listed API, read its metadata. Then consider each path, parameter, and output in detail, regisgtering template strings used for invoking each service. Create a mapping to facilitate lookups of invocation templates based on type pairs. Later, need semantics about what the meaning of the transitions is. Revenge of the semantic web and stuff.""" super(TranslatorRegistry, self).__init__("transreg", context) self.verbose = True #False self.concept_model = ConceptModel ("biolink-model") self.identifiers = Identifiers () # Use cached model self.op_map = Resource.get_resource_obj (os.path.join ("conf", "transreg.yml"), format='yaml') if not isinstance (self.op_map, dict): logger.debug ("Loaded cached copy of translator registry config.") self._load_registry_map () def _load_registry_map (self): """ Load the entire registry, parsing each specification. """ self.op_map = defaultdict(lambda:defaultdict(lambda:defaultdict(None))) url = "{0}/API_LIST.yml".format (self.url) registry = yaml.load (requests.get (url).text) apis = {} for context in registry['APIs']: metadata = context['metadata'] api_name = metadata.split (os.sep)[0].replace (" ","") """ For each API, load the specification... """ logger.debug ("API: {}".format (api_name)) api_url = "{0}/{1}".format (self.url, metadata) model = yaml.load (requests.get (api_url).text) #print (json.dumps (model, indent=2)) """ Use local specification fragments as layers over the registry specifications. This allows us to extend the registry while working within the same general data structure. """ layer_path = os.path.join ("registry", "layers", api_name, f"{api_name}.yaml") if os.path.exists (layer_path): with open (layer_path, "r") as stream: layer = yaml.load (stream.read ()) model = update_dict (model, layer) servers = model.get('servers', []) server = None if isinstance(servers,list) and len(servers) > 0: server = servers[0]['url'] """ Process each path or operation in this API. """ paths = model.get('paths', {}) for path in paths: obj = paths[path] #logger.debug ("path: {}".format (path)) get = obj['get'] if 'get' in obj else {} #logger.debug ("get: {}".format (get)) """ Process a parameter of this operation. """ for parameters in get.get('parameters',{}): #logger.debug ("param: {}".format (parameters)) if 'x-valueType' in parameters: values_in = parameters.get('x-requestTemplate',{}) """ Parse response value settings for this operation. """ success_response = get.get('responses',{}).get('200',{}) json_ld_url = success_response.get('x-JSONLDContext',None) json_ld = {} if json_ld_url: if json_ld_url.startswith ("http"): json_ld = requests.get(json_ld_url).json () elif os.path.exists (json_ld_url): """ Override the JSON-LD context supplied by the registry with a local file. """ with open (json_ld_url, "r") as stream: json_ld = json.loads (stream.read ()) """ Get concept types. * Prefer explicitly curated metadata specifying a concept name. * Guess the appropriate concept based on heuristic mappings * TODO: Accept namespaced concpets from interoperable models (?) """ all_input_types = [ v['valueType'] for v in values_in ] in_concept = self.get_concept (parameters, all_input_types) all_out_types = [ response_value['valueType'] for response_value in success_response.get('x-responseValueType',{}) ] out_concept = self.get_concept (success_response, all_out_types) for v in values_in: in_type = v['valueType'] x_template = v['template'] #logger.debug ("in_type: {}".format (in_type)) """ Create a record for each response scenario. """ """ TODO: We need to flexibly handle multiple response scenarios. Store multiple per operation and manage dynamically. """ for response_value in success_response.get('x-responseValueType',{}): out_type = response_value['valueType'] #logger.debug ("out_type: {}".format (out_type)) #logger.debug (" --api> {0} in: {1} out: {2}".format (api_name, in_type, out_type)) self.op_map[api_name][in_type][out_type] = { "op" : path, "get_url" : "{0}{1}".format (server, x_template), "in_concept" : in_concept, "in_types" : all_input_types, "out_type" : out_type, "obj_path" : response_value.get('path', None), "out_concept" : out_concept, "jsonld" : json_ld } #print (json.dumps (self.op_map[api_name][in_type][out_type], indent=2)) # Cache model registry_map = os.path.join (os.path.dirname (__file__), "conf", "transreg.yml") vanilla_op_map = defaultdict_to_regular (self.op_map) with open (registry_map, "w") as stream: logger.debug ("Cache copy of registry map") yaml.dump (vanilla_op_map, stream, default_flow_style=False) return self.op_map def get_concept (self, context, identifiers): curies = list(filter (lambda v : v != None, [ self.identifiers.id2curie (v) for v in identifiers ])) concept = context.get ("x-concept", self.get_concept_name (curies)) return concept def get_concept_name (self, curies): concept = self.concept_model.get_single_concept_by_prefixes (curies) return concept.name if concept else None def get_service_metadata (self, api_name, in_type, out_type): metadata = self.op_map.get(api_name,{}).get (in_type,{}).get (out_type,{}) return DataStructure.to_named_tuple ('ServiceMetadata', metadata) if len(metadata) > 0 else None def new_edge (self, source, function, properties, source_node=None, target_node=None): raise RuntimeError('The following KEdge constructor looks very suspect.') edge = KEdge (source, function, properties) edge.source_node = source_node edge.target_node = target_node return edge def get (self, api_name, node, method_metadata): """ Invoke a GET requests on the specified API for value node with the given metadata. """ result = [] try: """ Find synonym in the input node of an appropriate input type for this operation. """ input_arg = None input_type = None for synonym in node.synonyms: #print (f"synonym -> {synonym}") syn = self.identifiers.curie_instance2id (synonym) print (f"syn -> {syn}") print (f"syn -> {method_metadata.in_types}") for t in method_metadata.in_types: if input_arg: break if t in syn: input_arg = synonym.split (':')[1] input_type = t break """ Fail if no supplied synonym is of an appropriate type to make the call. """ if not input_arg: raise ValueError (f"Node {node} contains no synonyms of type {method_metadata.in_types} required by operation {method_metadata.op}") """ Get the service metadata """ service_metadata = self.get_service_metadata (api_name, input_type, method_metadata.out_type) logger.debug ("* Executing translator registry method: {0} in: {1} out: {2} template: {3} value: {4} ".format ( api_name, input_type, method_metadata.out_type, service_metadata.get_url, node)) """ Parameterize and execute the HTTP request. """ url = Template (service_metadata.get_url).render (input=input_arg) response = requests.get (url).json () #with open ("a.txt", "w") as stream: # stream.write (json.dumps (response, indent=2)) """ Expand the context with JSON-LD """ jsonld_context = json.loads (json.dumps (service_metadata.jsonld), parse_float=lambda v : str (v)) del jsonld_context['@context']['@version'] expanded = jsonld.expand ( response, { "expandContext" : jsonld_context['@context'] }) """ Extract data from the returned JSON object. """ """ TODO: Responses are complex. Figure out how to generalize * Traversal of the response * Decisions about how to create nodes and edges * What to say about the semantic types of returned identifiers """ print (json.dumps (expanded, indent=2)) for obj in expanded: for predicate, v in obj.items (): if isinstance (v, list): for item in v: val = item["@id"] if "@id" in item else None if val: curie = self.identifiers.instance2curie (val) #print (f"val: {val} curie: {curie}") out_concept = method_metadata.out_concept node_type = get_node_type (out_concept) if curie and node_type: #print (f" ------> node type {node_type} id {val} ") new_node = KNode(curie, type=node_type) result.append ( ( self.new_edge(source=self.name, function=predicate, properties=response, source_node = node, target_node = new_node), new_node ) ) except Exception as e: traceback.print_exc () exc_type, exc_value, exc_tb = sys.exc_info() exception_text = traceback.format_exception (exc_type, exc_value, exc_tb) logger.error (exception_text) return result def add_method (self, cls, api, method_metadata): try: getattr
= Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if (matched and not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 4 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments. 2) lines member contains lines without comments. 3) raw_lines member contains all the lines without processing. 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw strings removed. All these members are of <type 'list'>, and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed def FindEndOfExpressionInLine(line, startpos, stack): """Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ for i in range(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) if end_pos > -1: return (line, linenum, end_pos) # Did not find end of expression before end of file, give up return (line, clean_lines.NumLines(), -1) def
, u'皔' : [u'h'] , u'辚' : [u'l'] , u'缥' : [u'p'] , u'庤' : [u'z'] , u'蠫' : [u'l'] , u'鞪' : [u'm'] , u'涮' : [u's'] , u'䜵' : [u'c', u's'] , u'逻' : [u'l'] , u'疾' : [u'j'] , u'軄' : [u'z'] , u'繏' : [u'x'] , u'巎' : [u'n'] , u'雔' : [u'c'] , u'泘' : [u'h'] , u'䙟' : [u'd', u'w'] , u'㯢' : [u'z'] , u'齥' : [u'x'] , u'瓨' : [u'h', u'j'] , u'跮' : [u'c', u'd'] , u'絹' : [u'x', u'j'] , u'峸' : [u'c'] , u'闾' : [u'l'] , u'馋' : [u'c'] , u'昊' : [u'h'] , u'媍' : [u'f'] , u'㬌' : [u'j'] , u'业' : [u'y'] , u'枟' : [u'y'] , u'㢡' : [u'j'] , u'锨' : [u'x'] , u'嘪' : [u'm'] , u'読' : [u'd'] , u'欬' : [u'k'] , u'侯' : [u'h'] , u'銽' : [u'g'] , u'猼' : [u'p', u'b'] , u'垿' : [u'x'] , u'棁' : [u'z', u't'] , u'虊' : [u'l'] , u'孌' : [u'l'] , u'烑' : [u'y'] , u'蟟' : [u'l'] , u'摞' : [u'l'] , u'壡' : [u'r'] , u'㥠' : [u'x'] , u'譬' : [u'p'] , u'䱮' : [u'e'] , u'旳' : [u'd'] , u'鍼' : [u'q', u'z'] , u'呾' : [u'd'] , u'榀' : [u'p'] , u'踅' : [u'x', u'c'] , u'䌇' : [u'h', u's'] , u'熐' : [u'm'] , u'阕' : [u'q'] , u'氙' : [u'x'] , u'蒞' : [u'l'] , u'妠' : [u'n'] , u'琩' : [u'c'] , u'抲' : [u'h'] , u'鬷' : [u'z'] , u'尹' : [u'y'] , u'觀' : [u'g'] , u'䫂' : [u'd'] , u'祋' : [u'd'] , u'㩍' : [u'c'] , u'釐' : [u'x', u'l'] , u'勒' : [u'l'] , u'豙' : [u'y'] , u'翤' : [u'c'] , u'鑩' : [u'e'] , u'橭' : [u'g'] , u'苲' : [u'z'] , u'䟴' : [u'z'] , u'牽' : [u'q'] , u'㝿' : [u'q', u'b', u'f'] , u'䮁' : [u'p'] , u'肇' : [u'z'] , u'缎' : [u'd'] , u'抛' : [u'p'] , u'䜞' : [u'j'] , u'鰤' : [u's'] , u'莱' : [u'l'] , u'縸' : [u'm'] , u'劻' : [u'k'] , u'淅' : [u'x'] , u'䙈' : [u'h'] , u'㳏' : [u'c', u's'] , u'齎' : [u'q', u'j'] , u'痕' : [u'h', u'g'] , u'苛' : [u'h', u'k'] , u'祢' : [u'm', u'n'] , u'工' : [u'g'] , u'泯' : [u'm'] , u'䅲' : [u'q'] , u'㿹' : [u't'] , u'鹸' : [u'j'] , u'瓿' : [u'p', u'b'] , u'七' : [u'q'] , u'蜉' : [u'f'] , u'碌' : [u'l'] , u'嘓' : [u'g'] , u'愝' : [u'y'] , u'䂜' : [u'p', u'b'] , u'馢' : [u'j'] , u'䤭' : [u'y', u'k', u's'] , u'蘳' : [u'h'] , u'箶' : [u'h'] , u'儽' : [u'l'] , u'恇' : [u'k'] , u'䏆' : [u'q', u'c'] , u'飌' : [u'f'] , u'䡗' : [u'g'] , u'腝' : [u'r', u'e', u'n'] , u'章' : [u'z'] , u'偧' : [u'z'] , u'捱' : [u'a'] , u'䋰' : [u'j'] , u'鯶' : [u'h'] , u'较' : [u'j'] , u'栂' : [u'm'] , u'鞓' : [u't'] , u'瀒' : [u's'] , u'咕' : [u'g'] , u'榗' : [u'j'] , u'鬠' : [u'k'] , u'堢' : [u'b'] , u'熧' : [u'z'] , u'㚩' : [u'r'] , u'蒵' : [u'x'] , u'攴' : [u'p'] , u'妷' : [u'z'] , u'㨶' : [u'c', u'z', u'd'] , u'衂' : [u'n'] , u'䵄' : [u'p', u'b', u'f'] , u'曉' : [u'x'] , u'㯋' : [u'y'] , u'遒' : [u'q'] , u'啔' : [u'q'] , u'觗' : [u'z'] , u'橖' : [u't'] , u'仙' : [u'x'] , u'釧' : [u'c'] , u'牦' : [u'm', u'l'] , u'囩' : [u'y'] , u'毫' : [u'h'] , u'蕴' : [u'y'] , u'婶' : [u's'] , u'珻' : [u'm'] , u'枈' : [u'b'] , u'頍' : [u'k'] , u'崏' : [u'm'] , u'誖' : [u'b'] , u'侘' : [u'c'] , u'稡' : [u'z'] , u'㼣' : [u'b'] , u'銦' : [u'y'] , u'垨' : [u's'] , u'贯' : [u'w', u'g'] , u'粺' : [u'b'] , u'锿' : [u'a'] , u'潃' : [u'x'] , u'蟈' : [u'y', u'g'] , u'䓊' : [u'y', u'e', u'w'] , u'睓' : [u't'] , u'懜' : [u'm'] , u'驡' : [u'p'] , u'彣' : [u'w'] , u'摵' : [u's'] , u'㥷' : [u'y'] , u'鳺' : [u'f'] , u'凼' : [u'd'] , u'攆' : [u'n'] , u'䆉' : [u'b'] , u'骏' : [u'j'] , u'䴖' : [u'j'] , u'蘜' : [u'j'] , u'碣' : [u'y', u'k', u'j'] , u'唦' : [u's'] , u'搰' : [u'h', u'g'] , u'䂳' : [u'c'] , u'馹' : [u'r'] , u'蕆' : [u'c'] , u'翍' : [u'p'] , u'呐' : [u'n'] , u'䟝' : [u't'] , u'飣' : [u'd'] , u'獪' : [u'h', u'k'] , u'葰' : [u's', u'j'] , u'绷' : [u'b'] , u'孺' : [u'r'] , u'贁' : [u'b'] , u'檄' : [u'x'] , u'尋' : [u'x'] , u'㦎' : [u'h'] , u'锑' : [u't'] , u'犔' : [u'x'] , u'讚' : [u'z'] , u'笥' : [u's'] , u'媤' : [u's'] , u'谫' : [u'j'] , u'鎪' : [u's'] , u'榮' : [u'r'] , u'䌵' : [u'z'] , u'㢸' : [u'b'] , u'鐻' : [u'q', u'j'] , u'熾' : [u'c'] , u'諄' : [u'z'] , u'穏' : [u'w'] , u'姎' : [u'y'] , u'鋔' : [u'w'] , u'棘' : [u'j'] , u'䉟' : [u'h'] , u'㿢' : [u'y', u'z'] , u'魥' : [u'e'] , u'烨' : [u'y'] , u'觮' : [u'l'] , u'祹' : [u't'] , u'壸' : [u'k'] , u'釾' : [u'y'] , u'鶋' : [u'j'] , u'戊' : [u'w'] , u'庍' : [u'b'] , u'㼌' : [u'y'] , u'褘' : [u'h'] , u'掟' : [u'z'] , u'㲡' : [u'n'] , u'鄨' : [u'b'] , u'刪' : [u's'] , u'躭' : [u'd'] , u'漬' : [u'q', u's', u'z'] , u'䮯' : [u'a'] , u'隽' : [u'j'] , u'眼' : [u'y', u'w'] , u'县' : [u'x'] , u'況' : [u'k'] , u'艊' : [u'b'] , u'彌' : [u'm'] , u'瓑' : [u'l'] , u'菟' : [u't'] , u'恞' : [u'y'] , u'峡' : [u'x'] , u'转' : [u'z'] , u'靼' : [u'd'] , u'偾' : [u'f'] , u'涀' : [u'x'] , u'訅' : [u'q'] , u'䜇' : [u'g'] , u'疐' : [u'z', u'd'] , u'㚒' : [u's'] , u'鈕' : [u'c', u'n'] , u'栙' : [u'x'] , u'肞' : [u'c'] , u'嶠' : [u'q', u'j'] , u'逸' : [u'y'] , u'瀩' : [u'd'] , u'暲' : [u'z'] , u'鼷' : [u'x'] , u'堹' : [u'z'] , u'跀' : [u'y'] , u'仂' : [u'l'] , u'㹍' : [u'd'] , u'闐' : [u't'] , u'囒' : [u'l'] , u'衙' : [u'y'] , u'䕛' : [u'j'] , u'篤' : [u'd'] , u'適' : [u's', u'z', u'd', u't'] , u'蛲' : [u'r'] , u'䏴' : [u's'] , u'白' : [u'b'] , u'侁' : [u's'] , u'蒇' : [u'c'] , u'笎' : [u'y'] , u'垑' : [u'c'] , u'暛' : [u's'] , u'䌞' : [u'l'] , u'㦥' : [u'h', u'x'] , u'頤' : [u'y'] , u'享' : [u'x'] , u'螱' : [u'w'] , u'稸' : [u'x'] , u'嚻' : [u'x'] , u'槅' : [u'h', u'g'] , u'䉈' : [u's'] , u'㣏' : [u'j'] , u'魎' : [u'l'] , u'燕' : [u'y'] , u'蛛' : [u'z'] , u'絢' : [u'x'] , u'姥' : [u'm', u'l'] , u'棯' : [u'r', u'n'] , u'䕲' : [u'l'] , u'㯹' : [u'b'] , u'驸' : [u'f'] , u'烿' : [u'r'] , u'茉' : [u'm'] , u'刓' : [u'w'] , u'攝' : [u's', u'n'] , u'䒜' : [u'n'] , u'鶢' : [u'y'] , u'䴭' : [u'c'] , u'舳' : [u'z'] , u'羶' : [u's'] , u'唽' : [u'x'] , u'摇' : [u'y'] , u'䟆' : [u'b'] , u'鳌' : [u'a'] , u'㝑' : [u'p', u'm'] , u'蕝' : [u'j', u'z'] , u'绠' : [u'g'] , u'呧' : [u'd'] , u'東' : [u'd'] , u'䛰' : [u'h'] , u'㙻' : [u'q', u'c', u'z'] , u'讃' : [u'z'] , u'氂' : [u'm', u'l'] , u'䢅' : [u'c'] , u'鎓' : [u'w'] , u'傕' : [u'j'] , u'涗' : [u's'] , u'鼠' : [u's'] , u'尢' : [u'y', u'w'] , u'疧' : [u'z'] , u'肵' : [u'q', u'j'] , u'愴' : [u'c'] , u'嶷' : [u'y', u'n'] , u'豂' : [u'l'] , u'䥄' : [u'c', u'z'] , u'拉' : [u'l'] , u'㿋' : [u's'] , u'鑒' : [u'j'] , u'兔' : [u't'] , u'跗' : [u'f'] , u'湖' : [u'h'] , u'䫙' : [u's'] , u'勩' : [u'y'] , u'濫' : [u'j', u'l'] , u'腴' : [u'y'] , u'并' : [u'b'] , u'矻' : [u'k', u'w'] , u'㓽' : [u'c'] , u'授' : [u's'] , u'鰍' : [u'q'] , u'夏' : [u'x', u'j'] , u'躖' : [u'd'] , u'䮘' : [u'p', u'e', u'n'] , u'縡' : [u'z'] , u'㬣' : [u'd'] , u'隦' : [u'p'] , u'厨' : [u'c'] , u'褯' : [u'j'] , u'䘱' : [u'y'] , u'確' : [u'q'] , u'㖼' : [u'm', u'd', u'l'] , u'鄿' : [u'q'] , u'歃' : [u'x', u's'] , u'菈' : [u'l'] , u'䃊' : [u'j'] , u'獓' : [u'a'] , u'旜' : [u'z'] , u'鹡' : [u'j'] , u'季' : [u'j'] , u'恵' : [u'h'] , u'㵷' : [u'b'] , u'飺' : [u'c'] , u'嗼' : [u'm'] , u'礆' : [u'x', u'j'] , u'嶉' : [u'z'] , u'蚏' : [u'y'] , u'儖' : [u'l'] , u'㮝' : [u'h', u'l'] , u'騜' : [u'h'] , u'撣' : [u's', u'c', u'd'] , u'䤦' : [u'y'] , u'砰' : [u'p'] , u'岳' : [u'y'] , u'薹' : [u't'] , u'偀' : [u'y'] , u'饆' : [u'b'] , u'揍' : [u'c', u'z'] , u'䡐' : [u't'] , u'睚' : [u'y'] , u'寝' : [u'q'] , u'蓣' : [u'y'] , u'潪' : [u'd'] , u'㧱' : [u'c', u't', u'n'] , u'顰' : [u'p'] , u'拷' : [u'k'] , u'䝺' : [u'g'] , u'鄁' : [u'b'] , u'的' : [u'd'] , u'䀋' : [u'y'] , u'褑' : [u'y'] , u'溔' : [u'y'] , u'鞚' : [u'k'] , u'逫' : [u'j'] , u'疮' : [u'c'] , u'張' : [u'z'] , u'蠻'
it is already initialised, this does nothing. if not self._lookup_dict: self._lookup_dict = { "is_descendant_of": {}, "mutually_exclusive_of": {} } def _store_calculation(self, name, key, value): # Put a calculation into a named lookup dictionary. # This method will always store calculation data in the root expansion. # :param name: str # :param key: object used to store the calculation result (e.g. a tuple) # :param value: calculation result | Expansion._NO_CALCULATION # Get the root expansion. root = self.root_expansion # Initialise the lookup dictionary as required. root._init_lookup() # Always use IDs for the key instead. This way calculations are stored for # exact expansions, rather than for any other comparable expansions. if isinstance(key, (tuple, list)): # Map each object x in 'key' to id(x) id_key = tuple(map(lambda x: id(x), key)) else: id_key = id(key) if value is self._NO_CALCULATION: # Drop the stored value, if there is one. root._lookup_dict[name].pop(id_key, None) else: # Otherwise store 'value' under the 'name' dictionary using 'id_key'. root._lookup_dict[name][id_key] = value def _lookup_calculation(self, name, key): # Check if a calculation has already been made and return it. If no # calculation has been made, Expansion._NO_CALCULATION will be returned. # This method will always check for calculations using the root expansion. # :param name: str # :param key: object used to store the calculation result (e.g. a tuple) # :returns: calculation result | Expansion._NO_CALCULATION # Get the root expansion. root = self.root_expansion # Initialise the lookup dictionary as required. root._init_lookup() # Always use IDs for the key instead. This way calculations are stored for # exact expansions, rather than for any other comparable expansions. if isinstance(key, (tuple, list)): # Map each object x in 'key' to id(x) id_key = tuple(map(lambda x: id(x), key)) else: id_key = id(key) # Return the value from the relevant dictionary or _NO_CALCULATION if it # hasn't been calculated yet. return root._lookup_dict[name].get(id_key, self._NO_CALCULATION) def invalidate_calculations(self): """ Invalidate calculations stored in the lookup tables that involve this expansion. This only effects ``mutually_exclusive_of`` and ``is_descendant_of``, neither of which are used in compiling or matching rules. This should be called if a child is added to an expansion or if an expansion's parent is changed outside of what ``JointTreeContext`` does. Some changes may also require invalidating descendants, the ``map_expansion`` function can be used with this method to accomplish that:: map_expansion(self, Expansion.invalidate_calculations) """ root = self.root_expansion if not root._lookup_dict: return # nothing to invalidate for d in root._lookup_dict.values(): for k, v in d.items(): # Assume k is either an expansion or an iterable of expansions if self is k or isinstance(k, (tuple, list)) and self in k: assert isinstance(d, dict) d.pop(k) # Do something similar for values elif self is v or isinstance(v, (tuple, list)) and self in k: d.pop(k) def __str__(self): descendants = ", ".join(["%s" % c for c in self.children]) if self.tag: return "%s(%s) with tag '%s'" % (self.__class__.__name__, descendants, self.tag) else: return "%s(%s)" % (self.__class__.__name__, descendants) def __repr__(self): return self.__str__() def __eq__(self, other): return type(self) == type(other) and self.children == other.children def __ne__(self, other): return not self.__eq__(other) def __contains__(self, item): return item in flat_map_expansion(self) @property def is_optional(self): """ Whether or not this expansion has an optional ancestor. :returns: bool """ result = False if self.parent: result = self.parent.is_optional return result @property def is_alternative(self): """ Whether or not this expansion has an AlternativeSet ancestor with more than one child. :returns: bool """ result = False if self.parent: result = self.parent.is_alternative return result @property def repetition_ancestor(self): """ This expansion's closest Repeat or KleeneStar ancestor, if it has one. :returns: Expansion """ parent = self.parent result = None while parent: if isinstance(parent, Repeat): result = parent break parent = parent.parent return result def collect_leaves(self, order=TraversalOrder.PreOrder, shallow=False): """ Collect all descendants of an expansion that have no children. This can include self if it has no children. RuleRefs are also counted as leaves. :param order: tree traversal order (default 0: pre-order) :param shallow: whether to not collect leaves from trees of referenced rules :returns: list """ return filter_expansion( self, lambda x: not x.children, order=order, shallow=shallow ) leaves = property(collect_leaves) @property def leaves_after(self): """ Generator function for leaves after this one (if any). :returns: generator """ self_reached = False leaves = self.root_expansion.leaves for leaf in leaves: if leaf is self: self_reached = True continue elif self_reached: yield leaf @property def matchable_leaves_after(self): """ Generator function yielding all leaves after self that are not mutually exclusive of it. :returns: generator """ for leaf in self.leaves_after: if not self.mutually_exclusive_of(leaf): yield leaf @property def root_expansion(self): """ Traverse to the root expansion r and return it. :returns: Expansion """ r = self while r.parent: r = r.parent return r def is_descendant_of(self, other): """ Whether this expansion is a descendant of another expansion. :param other: Expansion :returns: bool """ if self is other: return False calc_name = "is_descendant_of" calc = self._lookup_calculation(calc_name, (self, other)) if calc is not self._NO_CALCULATION: return calc # Return whether self is in other's expansion tree. result = bool(find_expansion(other, lambda x: x is self)) self._store_calculation(calc_name, (self, other), result) return result def mutually_exclusive_of(self, other): """ Whether this expansion cannot be spoken with another expansion. :param other: Expansion :returns: bool """ root = self.root_expansion # Trees are not joined, so we cannot guarantee mutual exclusivity. if root is not other.root_expansion: return False calc_name = "mutually_exclusive_of" # Check if this has been calculated before. Check (other, self) too; mutual # exclusivity is commutative. calc = self._lookup_calculation(calc_name, (self, other)) if calc is self._NO_CALCULATION: calc = self._lookup_calculation(calc_name, (other, self)) if calc is not self._NO_CALCULATION: return calc def add_leaf(x): if not x.children: self._store_calculation(calc_name, (x, self), True) self._store_calculation(calc_name, (x, other), True) def valid_alt_set(x): if isinstance(x, AlternativeSet) and len(x.children) > 1: e1, e2 = None, None for child in x.children: # If they haven't been found, check if child is self or self's # ancestor, or if child is other or other's ancestor. if not e1 and (self.is_descendant_of(child) or self is child): e1 = child if not e2 and (other.is_descendant_of(child) or other is child): e2 = child if e1 and e2: break # This is the expansion we're looking for if self and other descend # from it and if they are not both [descended from] the same child # of x. valid = e1 and e2 and e1 is not e2 if valid: # Add siblings / their leaf expansions in the expansion tree as # mutually exclusive to self and other for child in filter(lambda c: c is not e1 and c is not e2, x.children): map_expansion(child, add_leaf, shallow=True) return valid # Calculate mutually exclusivity, cache the calculation in root._lookup_dict # and return the result. result = bool(find_expansion(root, valid_alt_set)) root._store_calculation(calc_name, (self, other), result) return result class BaseExpansionRef(BaseRef, Expansion): """ Base class which RuleRef, NamedRuleRef, NullRef and VoidRef inherit from. """ def __init__(self, name): # Call both super constructors BaseRef.__init__(self, name) Expansion.__init__(self, []) @staticmethod def valid(name): return optionally_qualified_name.matches(name) def compile(self, ignore_tags=False): self.validate_compilable() if self.tag and not ignore_tags: return "<%s>%s" % (self.name, self.compiled_tag) else: return "<%s>" % self.name def __str__(self): return "%s('%s')" % (self.__class__.__name__, self.name) def __hash__(self): return hash("%s" % self) def __eq__(self, other): return Expansion.__eq__(self, other) and BaseRef.__eq__(self, other) def __copy__(self): e = type(self)(self.name) e.tag = self.tag return e def __deepcopy__(self, memo): return self.__copy__() class NamedRuleRef(BaseExpansionRef): """ Class used to reference rules by name. """ @property def referenced_rule(self): """ Find and return the rule this expansion references in the grammar. This raises an error if the referenced rule cannot be found using ``self.rule.grammar`` or if there is no link to a grammar. :raises: GrammarError :returns: Rule """ if self.rule and self.rule.grammar: return self.rule.grammar.get_rule_from_name(self.name) else: raise GrammarError("cannot get referenced Rule object from Grammar") def _make_matcher_element(self): # Wrap the parser element for the referenced rule's root expansion so that # the current match value for the NamedRuleRef is also set. return self._set_matcher_element_attributes(pyparsing.And([ self.referenced_rule.expansion.matcher_element ])) def __hash__(self): return super(NamedRuleRef,
<gh_stars>0 from contextlib import contextmanager import sys from tempfile import NamedTemporaryFile try: from unittest import mock except ImportError: import mock import pytest import pipdeptree as p # Tests for DAG classes def mock_pkgs(simple_graph): for node, children in simple_graph.items(): nk, nv = node p = mock.Mock(key=nk, project_name=nk, version=nv) as_req = mock.Mock(key=nk, project_name=nk, specs=[('==', nv)]) p.as_requirement = mock.Mock(return_value=as_req) reqs = [] for child in children: ck, cv = child r = mock.Mock(key=ck, project_name=ck, specs=cv) reqs.append(r) p.requires = mock.Mock(return_value=reqs) yield p def mock_PackageDAG(simple_graph): pkgs = list(mock_pkgs(simple_graph)) return p.PackageDAG.from_pkgs(pkgs) # util for comparing tree contents with a simple graph def dag_to_dict(g): return {k.key: [v.key for v in vs] for k, vs in g._obj.items()} def sort_map_values(m): return {k: sorted(v) for k, v in m.items()} t = mock_PackageDAG({ ('a', '3.4.0'): [('b', [('>=', '2.0.0')]), ('c', [('>=', '5.7.1')])], ('b', '2.3.1'): [('d', [('>=', '2.30'), ('<', '2.42')])], ('c', '5.10.0'): [('d', [('>=', '2.30')]), ('e', [('>=', '0.12.1')])], ('d', '2.35'): [('e', [('>=', '0.9.0')])], ('e', '0.12.1'): [], ('f', '3.1'): [('b', [('>=', '2.1.0')])], ('g', '6.8.3rc1'): [('e', [('>=', '0.9.0')]), ('f', [('>=', '3.0.0')])] }) def test_PackageDAG__get_node_as_parent(): assert 'b' == t.get_node_as_parent('b').key assert 'c' == t.get_node_as_parent('c').key def test_PackageDAG_filter(): # When both show_only and exclude are not specified, same tree # object is returned assert t.filter(None, None) is t # when show_only is specified g1 = dag_to_dict(t.filter(set(['a', 'd']), None)) expected = {'a': ['b', 'c'], 'b': ['d'], 'c': ['d', 'e'], 'd': ['e'], 'e': []} assert expected == g1 # when exclude is specified g2 = dag_to_dict(t.filter(None, ['d'])) expected = {'a': ['b', 'c'], 'b': [], 'c': ['e'], 'e': [], 'f': ['b'], 'g': ['e', 'f']} assert expected == g2 # when both show_only and exclude are specified g3 = dag_to_dict(t.filter(set(['a', 'g']), set(['d', 'e']))) expected = {'a': ['b', 'c'], 'b': [], 'c': [], 'f': ['b'], 'g': ['f']} assert expected == g3 # when conflicting values in show_only and exclude, AssertionError # is raised with pytest.raises(AssertionError): dag_to_dict(t.filter(set(['d']), set(['D', 'e']))) def test_PackageDAG_reverse(): t1 = t.reverse() expected = {'a': [], 'b': ['a', 'f'], 'c': ['a'], 'd': ['b', 'c'], 'e': ['c', 'd', 'g'], 'f': ['g'], 'g': []} assert isinstance(t1, p.ReversedPackageDAG) assert sort_map_values(expected) == sort_map_values(dag_to_dict(t1)) assert all([isinstance(k, p.ReqPackage) for k in t1.keys()]) assert all([isinstance(v, p.DistPackage) for v in p.flatten(t1.values())]) # testing reversal of ReversedPackageDAG instance expected = {'a': ['b', 'c'], 'b': ['d'], 'c': ['d', 'e'], 'd': ['e'], 'e': [], 'f': ['b'], 'g': ['e', 'f']} t2 = t1.reverse() assert isinstance(t2, p.PackageDAG) assert sort_map_values(expected) == sort_map_values(dag_to_dict(t2)) assert all([isinstance(k, p.DistPackage) for k in t2.keys()]) assert all([isinstance(v, p.ReqPackage) for v in p.flatten(t2.values())]) # Tests for Package classes # # Note: For all render methods, we are only testing for frozen=False # as mocks with frozen=True are a lot more complicated def test_DistPackage__render_as_root(): foo = mock.Mock(key='foo', project_name='foo', version='20.4.1') dp = p.DistPackage(foo) is_frozen = False assert 'foo==20.4.1' == dp.render_as_root(is_frozen) def test_DistPackage__render_as_branch(): foo = mock.Mock(key='foo', project_name='foo', version='20.4.1') bar = mock.Mock(key='bar', project_name='bar', version='4.1.0') bar_req = mock.Mock(key='bar', project_name='bar', version='4.1.0', specs=[('>=', '4.0')]) rp = p.ReqPackage(bar_req, dist=bar) dp = p.DistPackage(foo).as_parent_of(rp) is_frozen = False assert 'foo==20.4.1 [requires: bar>=4.0]' == dp.render_as_branch(is_frozen) def test_DistPackage__as_parent_of(): foo = mock.Mock(key='foo', project_name='foo', version='20.4.1') dp = p.DistPackage(foo) assert dp.req is None bar = mock.Mock(key='bar', project_name='bar', version='4.1.0') bar_req = mock.Mock(key='bar', project_name='bar', version='4.1.0', specs=[('>=', '4.0')]) rp = p.ReqPackage(bar_req, dist=bar) dp1 = dp.as_parent_of(rp) assert dp1._obj == dp._obj assert dp1.req is rp dp2 = dp.as_parent_of(None) assert dp2 is dp def test_DistPackage__as_dict(): foo = mock.Mock(key='foo', project_name='foo', version='1.3.2b1') dp = p.DistPackage(foo) result = dp.as_dict() expected = {'key': 'foo', 'package_name': 'foo', 'installed_version': '1.3.2b1'} assert expected == result def test_ReqPackage__render_as_root(): bar = mock.Mock(key='bar', project_name='bar', version='4.1.0') bar_req = mock.Mock(key='bar', project_name='bar', version='4.1.0', specs=[('>=', '4.0')]) rp = p.ReqPackage(bar_req, dist=bar) is_frozen = False assert 'bar==4.1.0' == rp.render_as_root(is_frozen) def test_ReqPackage__render_as_branch(): bar = mock.Mock(key='bar', project_name='bar', version='4.1.0') bar_req = mock.Mock(key='bar', project_name='bar', version='4.1.0', specs=[('>=', '4.0')]) rp = p.ReqPackage(bar_req, dist=bar) is_frozen = False assert 'bar [required: >=4.0, installed: 4.1.0]' == rp.render_as_branch(is_frozen) def test_ReqPackage__as_dict(): bar = mock.Mock(key='bar', project_name='bar', version='4.1.0') bar_req = mock.Mock(key='bar', project_name='bar', version='4.1.0', specs=[('>=', '4.0')]) rp = p.ReqPackage(bar_req, dist=bar) result = rp.as_dict() expected = {'key': 'bar', 'package_name': 'bar', 'installed_version': '4.1.0', 'required_version': '>=4.0'} assert expected == result # Tests for render_text # # @NOTE: These tests use mocked tree and it's not easy to test for # frozen=True with mocks. Hence those tests are covered only in # end-to-end tests. Check the ./e2e-tests script. @pytest.mark.parametrize( "list_all,reverse,expected_output", [ ( True, False, [ 'a==3.4.0', ' - b [required: >=2.0.0, installed: 2.3.1]', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - c [required: >=5.7.1, installed: 5.10.0]', ' - d [required: >=2.30, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - e [required: >=0.12.1, installed: 0.12.1]', 'b==2.3.1', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', 'c==5.10.0', ' - d [required: >=2.30, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - e [required: >=0.12.1, installed: 0.12.1]', 'd==2.35', ' - e [required: >=0.9.0, installed: 0.12.1]', 'e==0.12.1', 'f==3.1', ' - b [required: >=2.1.0, installed: 2.3.1]', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', 'g==6.8.3rc1', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - f [required: >=3.0.0, installed: 3.1]', ' - b [required: >=2.1.0, installed: 2.3.1]', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]' ] ), ( True, True, [ 'a==3.4.0', 'b==2.3.1', ' - a==3.4.0 [requires: b>=2.0.0]', ' - f==3.1 [requires: b>=2.1.0]', ' - g==6.8.3rc1 [requires: f>=3.0.0]', 'c==5.10.0', ' - a==3.4.0 [requires: c>=5.7.1]', 'd==2.35', ' - b==2.3.1 [requires: d>=2.30,<2.42]', ' - a==3.4.0 [requires: b>=2.0.0]', ' - f==3.1 [requires: b>=2.1.0]', ' - g==6.8.3rc1 [requires: f>=3.0.0]', ' - c==5.10.0 [requires: d>=2.30]', ' - a==3.4.0 [requires: c>=5.7.1]', 'e==0.12.1', ' - c==5.10.0 [requires: e>=0.12.1]', ' - a==3.4.0 [requires: c>=5.7.1]', ' - d==2.35 [requires: e>=0.9.0]', ' - b==2.3.1 [requires: d>=2.30,<2.42]', ' - a==3.4.0 [requires: b>=2.0.0]', ' - f==3.1 [requires: b>=2.1.0]', ' - g==6.8.3rc1 [requires: f>=3.0.0]', ' - c==5.10.0 [requires: d>=2.30]', ' - a==3.4.0 [requires: c>=5.7.1]', ' - g==6.8.3rc1 [requires: e>=0.9.0]', 'f==3.1', ' - g==6.8.3rc1 [requires: f>=3.0.0]', 'g==6.8.3rc1' ] ), ( False, False, [ 'a==3.4.0', ' - b [required: >=2.0.0, installed: 2.3.1]', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - c [required: >=5.7.1, installed: 5.10.0]', ' - d [required: >=2.30, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - e [required: >=0.12.1, installed: 0.12.1]', 'g==6.8.3rc1', ' - e [required: >=0.9.0, installed: 0.12.1]', ' - f [required: >=3.0.0, installed: 3.1]', ' - b [required: >=2.1.0, installed: 2.3.1]', ' - d [required: >=2.30,<2.42, installed: 2.35]', ' - e [required: >=0.9.0, installed: 0.12.1]', ] ), ( False, True, [ 'e==0.12.1', ' - c==5.10.0 [requires: e>=0.12.1]', ' - a==3.4.0 [requires: c>=5.7.1]', ' - d==2.35 [requires: e>=0.9.0]', ' - b==2.3.1 [requires: d>=2.30,<2.42]', ' - a==3.4.0 [requires: b>=2.0.0]', ' - f==3.1 [requires: b>=2.1.0]', ' - g==6.8.3rc1 [requires: f>=3.0.0]', ' - c==5.10.0 [requires: d>=2.30]', ' - a==3.4.0 [requires: c>=5.7.1]', ' - g==6.8.3rc1 [requires: e>=0.9.0]', ] ) ] ) def test_render_text(capsys, list_all, reverse, expected_output): tree = t.reverse() if reverse else t p.render_text(tree, list_all=list_all, frozen=False) captured = capsys.readouterr() assert '\n'.join(expected_output).strip() == captured.out.strip() # Tests for graph outputs def test_render_pdf(): output = p.dump_graphviz(t, output_format='pdf') @contextmanager def redirect_stdout(new_target): old_target, sys.stdout = sys.stdout, new_target try: yield new_target finally: sys.stdout = old_target with NamedTemporaryFile(delete=True) as f: with redirect_stdout(f): p.print_graphviz(output) rf = open(f.name, 'rb') assert b'%PDF' == rf.read()[:4] # @NOTE: rf is not closed to avoid "bad filedescriptor" error def test_render_svg(capsys): output = p.dump_graphviz(t, output_format='svg') p.print_graphviz(output) out, _ = capsys.readouterr() assert out.startswith('<?xml') assert '<svg' in out assert out.strip().endswith('</svg>') # Test for conflicting deps @pytest.mark.parametrize( "mpkgs,expected_keys,expected_output", [ ( { ('a', '1.0.1'): [('b', [('>=', '2.3.0')])], ('b', '1.9.1'): [] }, {'a': ['b']}, [ 'Warning!!! Possibly conflicting dependencies found:', '* a==1.0.1', ' - b [required: >=2.3.0, installed: 1.9.1]' ] ), ( { ('a', '1.0.1'): [('c', [('>=', '9.4.1')])], ('b', '2.3.0'): [('c', [('>=', '7.0')])], ('c', '8.0.1'): [] }, {'a': ['c']}, [ 'Warning!!! Possibly conflicting dependencies found:', '* a==1.0.1', ' - c [required: >=9.4.1, installed: 8.0.1]' ] ), ( { ('a', '1.0.1'): [('c', [('>=', '9.4.1')])], ('b', '2.3.0'): [('c', [('>=', '9.4.0')])] }, {'a': ['c'], 'b': ['c']}, [ 'Warning!!! Possibly conflicting dependencies found:', '* a==1.0.1', ' - c [required: >=9.4.1, installed: ?]', '* b==2.3.0', ' - c [required: >=9.4.0, installed: ?]' ] ), ( { ('a',
""" Script to create word2vec models, given a set of mapped POIs. """ # Authors: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> import argparse import os import math import errno import pandas as pd import geopandas as gpd from geopandas import GeoDataFrame from shapely.geometry import Point import sys sys.path.append("../GeoL") import numpy as np import seaborn as sns sns.set_style("ticks") sns.set_context("paper") import sklearn from sklearn import preprocessing from sklearn.model_selection import GridSearchCV # Perforing grid search from sklearn.metrics import confusion_matrix import sklearn.metrics as metrics import joblib import xgboost as xgb from xgboost.sklearn import XGBClassifier from matplotlib.pylab import rcParams rcParams['figure.figsize'] = 15, 4 def printEvalutationMetrics(df_y_test, y_pred): print(metrics.classification_report(df_y_test.values, y_pred)) print("ACCURACY: {}".format(metrics.accuracy_score(df_y_test.values, y_pred))) print("F1 SCORE: {}".format(metrics.f1_score( df_y_test.values, y_pred, average='macro'))) def runExperiment(df_train, df_test, CITY_NAME, SIZE, BASE_DIR_CITY, SIZE1, METRIC, S, WS, C): OUTPUT_PATH = os.path.join(BASE_DIR_CITY, "train") OUTPUT_FILENAME = os.path.join( OUTPUT_PATH, "metrics_s" + str(S) + "_ws" + str(WS) + "_c"+str(C)+".txt") dfs = [] dim = 200 df = {} # {"area": boro, "cell": dim} suffix_train = "General" suffix_test = "General" df_y_train = df_train['t_predominant'] df_y_test = df_test['t_predominant'] # Baseline df_train['t_predominant'].value_counts().max() y_pred = [df_train['t_predominant'].value_counts().idxmax()] * \ len(df_y_test) print("*****************************" + CITY_NAME + " "+str(SIZE)+"*********************************") print("****** BASELINE ******") # Print Metrics printEvalutationMetrics(df_y_test, y_pred) df['model'] = "baseline_"+METRIC + "_s" + \ str(S) + "_ws" + str(WS) + "_c"+str(C) # metrics.accuracy_score(df_y_test.values, y_pred) df['accuracy'] = metrics.accuracy_score(df_y_test.values, y_pred) # metrics.accuracy_score(df_y_test.values, y_pred) df['f1-score'] = metrics.f1_score(df_y_test.values, y_pred, average='macro') # metrics.accuracy_score(df_y_test.values, y_pred) df['precision'] = metrics.precision_score( df_y_test.values, y_pred, average='macro') # metrics.accuracy_score(df_y_test.values, y_pred) df['recall'] = metrics.recall_score( df_y_test.values, y_pred, average='macro') dfs.append(df) print("**********************") # # xgboost Classifier df = {} print("****** XGBOOST ******") df_X_train = df_train[[c for c in df_train.columns if c.startswith('f_')]] df_X_test = df_test[[c for c in df_test.columns if c.startswith('f_')]] # colsample_bytree=0.8, scale_pos_weight=1, learning_rate=0.1, min_child_weight=5,n_estimators=177, subsample=0.8, max_depth=3, gamma=0) clf = xgboost.XGBClassifier() clf.fit(df_X_train.as_matrix(), df_y_train.values.ravel()) y_pred = clf.predict(df_X_test.as_matrix()) # Print Metrics printEvalutationMetrics(df_y_test, y_pred) df['model'] = 'GBT_' + METRIC + "_s" + \ str(S) + "_ws" + str(WS) + "_c"+str(C) # metrics.accuracy_score(df_y_test.values, y_pred) df['accuracy'] = metrics.accuracy_score(df_y_test.values, y_pred) # metrics.accuracy_score(df_y_test.values, y_pred) df['f1-score'] = metrics.f1_score(df_y_test.values, y_pred, average='macro') # metrics.accuracy_score(df_y_test.values, y_pred) df['precision'] = metrics.precision_score( df_y_test.values, y_pred, average='macro') # metrics.accuracy_score(df_y_test.values, y_pred) df['recall'] = metrics.recall_score( df_y_test.values, y_pred, average='macro') dfs.append(df) print(dfs) df = pd.DataFrame(dfs) print(df.head()) with open(OUTPUT_FILENAME, 'a') as f: # Already has column names if (os.stat(OUTPUT_FILENAME).st_size > 0): df.to_csv(f, header=False, sep='\t') else: df.to_csv(f, header=True, sep='\t') print('********* CONFUSION MATRIX *******************') print(confusion_matrix(df_y_test.values, y_pred)) print("********************************************************************************") # --------------------------- this functions serve for param estimation ------------------------------------ def modelfit(model, X, y, useTrainCV=True, cv_folds=5, early_stopping_rounds=50, verbose=False): if useTrainCV: xgb_param = model.get_xgb_params() xgtrain = xgb.DMatrix(X.values, label=y) cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=model.get_params()['n_estimators'], nfold=cv_folds, metrics='merror', early_stopping_rounds=early_stopping_rounds) # verbose_eval=True)#show_progress=True) model.set_params(n_estimators=cvresult.shape[0]) # Fit the algorithm on the data model.fit(X, y, eval_metric='merror') if verbose: score, predictions = evaluate(model, X, y) print("Score: %f" % score) def tune(X, y, param_test, verbose=0, learning_rate=0.1, n_estimators=140, max_depth=5, min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8, scale_pos_weight=1, reg_alpha=0, seed=28, cv=5): gsearch = GridSearchCV( estimator=XGBClassifier(max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators, silent=True, objective='multi:softmax', booster='gbtree', n_jobs=1, nthread=1, gamma=gamma, min_child_weight=min_child_weight, max_delta_step=0, subsample=subsample, colsample_bytree=colsample_bytree, colsample_bylevel=1, reg_alpha=reg_alpha, reg_lambda=1, scale_pos_weight=scale_pos_weight, base_score=0.5, random_state=0, seed=seed, missing=None), param_grid=param_test, scoring='f1_macro', n_jobs=2, iid=False, cv=cv, verbose=verbose) gsearch.fit(X, y) return gsearch.best_estimator_, gsearch.grid_scores_, gsearch.best_params_, gsearch.best_score_ # return gsearch.best_estimator_, gsearch.cv_results_, gsearch.best_params_, gsearch.best_score_ def evaluate(model, X_test, y_test): # transform back encoded labels to strings ,e.g.:"Industrial" predictions = model.predict(X_test) return sklearn.metrics.f1_score(y_test, predictions, average="macro"), predictions def train_test(params, X_train, y_train, X_test, y_test, seed, verbose=False): num_class = len(np.unique(y_train)) model = XGBClassifier(objective='multi:softmax', num_class=num_class, seed=seed) model.set_params(**params) # Train and test the model modelfit(model, X_train, y_train, verbose=verbose) score, predictions = evaluate(model, X_test, y_test) return model, score, predictions # --------------------------- END param estimation ------------------------------------ # TUNING AND TESTING def build_model_and_tune(tuning, params, X_train, y_train, seed, verbose=True): # Best score and update of the parameters def tune_and_update(param_test, parameters): best_estimator, grid_scores, best_params, best_score = tune(X_train, y_train, param_test, seed=seed, **parameters) if best_score >= tune_and_update.score: tune_and_update.score = best_score params.update(best_params) tuning.append((parameters.copy(), best_score)) return best_score tune_and_update.score = float('-inf') # Build a model with initial parameters #alg, f1_score, predictions = test_param(params, X_train, y_train, X_test, y_test, seed, verbose=verbose > 1) #if verbose > 0: # print('Primo modello\tTesting rmse = ' + str(f1_score) + '\n') #testing.append((params.copy(), f1_score)) # Tuning of the parameters params['n_estimators'] = 140 param_test1 = { 'max_depth': list(range(3, 10, 2)), 'min_child_weight': list(range(1, 6, 2)) } sc = tune_and_update(param_test1, params) if verbose > 0: print('Tuning 1\tScore = ' + str(sc)) param_test2 = { 'max_depth': [params['max_depth'] + k for k in [-1, 0, 1] if params['max_depth'] + k > 0], 'min_child_weight': [params['min_child_weight'] + k for k in [-1, 0, 1] if params['min_child_weight'] + k > 0] } sc = tune_and_update(param_test2, params) if verbose > 0: print('Tuning 2\tScore = ' + str(sc)) param_test2b = {'min_child_weight': [6, 8, 10, 12]} sc = tune_and_update(param_test2b, params) if verbose > 0: print('Tuning 2b\tScore = ' + str(sc)) param_test3 = {'gamma': [i/10.0 for i in range(0, 5)]} sc = tune_and_update(param_test3, params) if verbose > 0: print('Tuning 3\tScore = ' + str(sc)) params['n_estimators'] = 177 param_test4 = { 'subsample': [i/10.0 for i in range(6, 10)], 'colsample_bytree': [i/10.0 for i in range(6, 10)] } sc = tune_and_update(param_test4, params) if verbose > 0: print('Tuning 4\tScore = ' + str(sc)) ss = int(params['subsample']*100) csbt = int(params['colsample_bytree']*100) param_test5 = { 'subsample': [i/100.0 for i in range(max(0, ss-10), ss+5, 5)], 'colsample_bytree': [i/100.0 for i in range(max(0, csbt-10), csbt+5, 5)] } sc = tune_and_update(param_test5, params) if verbose > 0: print('Tuning 5\tScore = ' + str(sc)) param_test6 = { 'reg_alpha': [1e-5, 1e-2, 0.1, 1, 100] } sc = tune_and_update(param_test6, params) if verbose > 0: print('Tuning 6\tScore = ' + str(sc)) if 'reg_alpha' in params: a = math.log10(params['reg_alpha']) else: a = 0 param_test7 = { 'reg_alpha': [0] + np.logspace(a-2, a+1, num=4) # [0, 0.001, 0.005, 0.01, 0.05] } sc = tune_and_update(param_test7, params) if verbose > 0: print('Tuning 7\tScore = ' + str(sc)) param_test8 = { 'n_estimators': [10, 100, 1000, 3000], 'learning_rate': [0.005, 0.01, 0.05, 0.1] } sc = tune_and_update(param_test8, params) if verbose > 0: print('Tuning 8\tScore = ' + str(sc)) n = math.log10(params['n_estimators']) l = math.log10(params['learning_rate']) param_test9 = { 'n_estimators': [int(x) for x in np.logspace(min(1, n-1), n+1, num=3)], 'learning_rate': np.logspace(l-1, l+1, num=3) } sc = tune_and_update(param_test9, params) if verbose > 0: print('Tuning 9\tScore = ' + str(sc)) return params, tuning # NO TUNING JUST TRAIN+TESTING def build_model(params, X_train, y_train, X_test, y_test, seed, verbose=1): model, score, predictions = train_test(params, X_train, y_train, X_test, y_test, seed, verbose=verbose > 1) return model, predictions, score # ----------------------------------------------------------------------------------------------------------- def main(argv): parser = argparse.ArgumentParser('Run XGBOOST on Cellvector embeddings') parser.add_argument('-itr', '--input-train', help='Input train', action='store', dest='input_train', required=True, type=str) parser.add_argument('-ite', '--input_test', help='Input test', action='store', dest='input_test', required=True, type=str) parser.add_argument('-dm', '--directory-model', help='Directory to store outputted model', action='store', dest='directory_model', required=True, type=str) parser.add_argument('-dp', '--directory-predictions', help='Directory to store outputted predictions', action='store', dest='directory_predictions', required=True, type=str) parser.add_argument('-t', '--tuning', help='Enable XGB parameter tuning. Disabled by default', dest='enable_tuning', action='store_true', default=False) args = parser.parse_args() model_path = os.path.join(args.directory_model, 'test.model') pred_path = os.path.join(args.directory_predictions, 'pred.dat') # Load TRAIN data df_train = pd.read_csv(args.input_train, sep="\t") # Load TEST data df_test = pd.read_csv(args.input_test, sep="\t") le = preprocessing.LabelEncoder() labels = le.fit(df_train["target"].values.ravel()) df_train["encoded_target"] = labels.transform(df_train["target"].values.ravel()) df_test["encoded_target"] = labels.transform(df_test["target"].values.ravel()) # Create Train/Test from dataframe X_train = df_train[[c for c in df_train.columns if c.startswith("f_")]] y_train = df_train["encoded_target"].values.ravel() X_test = df_test[[c for c in df_test.columns if c.startswith("f_")]] y_test = df_test["encoded_target"].values.ravel() # Check data and Train/Test proportions print("X_train", len(X_train.values)) print("y_train", len(y_train)) print("X_test", len(X_test.values)) print("y_test", len(y_test)) print("X_train proportions: ", len(X_train.values) / (len(X_train.values)+len(X_test.values)) * 100) print("X_test proportions: ", len(X_test.values) / (len(X_train.values)+len(X_test.values)) * 100) print("y_train proportions: ", len(y_train) / (len(y_train)+len(y_test)) * 100) print("y_test proportions: ", len(y_test) / (len(y_train)+len(y_test)) * 100) # Initialize variable for later use tuning = [] # Initialize model parameters params = {} params['learning_rate'] = 0.1 params['n_estimators'] = 1000 params['max_depth'] = 5 params['min_child_weight'] = 1 params['gamma'] = 0 params['subsample'] = 0.8 params['colsample_bytree'] = 0.8 params['scale_pos_weight'] = 1 # If Tuning... if args.enable_tuning: # Train + Tune parameters + Test params, tuning = build_model_and_tune(tuning, params, X_train, y_train, 27) else: # Train + Test params, tuning = build_model(tuning, params, X_train, y_train, 27) print('\tValutazione modello finale:') model, score, predictions = train_test(params, X_train, y_train, X_test, y_test, 27) # save_model(alg, args.directory_model) joblib.dump(model, model_path) # save predictions pred_series = pd.Series(le.inverse_transform(predictions)) pred_series.to_csv(pred_path,index=None, header=False) # print('\t\tTesting rmse = ') print("----TUNING----\n") print(tuning) # TODO: ERA cosi, io l'ho cambiato sotto va bene? data = X.merge(Y, on=keys) # ma devi anche iterare su targets???? #data = pd.concat([X_train, y_train]) #std = data.std() #m = data.min() #M = data.max() #print('\t\tdata range = ' +
<reponame>richardingham/octopus-editor-server<gh_stars>1-10 # Twisted Imports from twisted.internet import reactor, defer, task from twisted.python import log # Octopus Imports from octopus.runtime.sequence.util import Runnable, Pausable, Cancellable, BaseStep from octopus.runtime.sequence.error import NotRunning, AlreadyRunning, NotPaused from octopus.constants import State from octopus.data.data import BaseVariable from octopus.machine import Component # Package Imports from ..util import EventEmitter # Debugging defer.Deferred.debug = True def populate_blocks (): def subclasses (cls): return cls.__subclasses__() + [ g for s in cls.__subclasses__() for g in subclasses(s) ] from .blocks import mathematics, text, logic, controls, variables, machines, dependents, images, colour Workspace.blocks = { c.__name__: c for c in subclasses(Block) } class Workspace (Runnable, Pausable, Cancellable, EventEmitter): blocks = {} def __init__ (self): self.state = State.READY self.allBlocks = {} self.topBlocks = {} self.variables = Variables() def addBlock (self, id, type, fields = None, x = 0, y = 0): try: blockType = type blockClass = self.blocks[blockType] except KeyError: raise Exception("Unknown Block: %s" % blockType) block = blockClass(self, id) block.position = [x, y] try: for field, value in fields.items(): block.fields[field] = value except AttributeError: pass block.created() self.allBlocks[block.id] = block self.topBlocks[block.id] = block self.emit('top-block-added', block = block) def getBlock (self, id): try: return self.allBlocks[id] except KeyError: print("Attempted to access unconnected block {:s}".format(str(id))) raise def removeBlock (self, id): block = self.getBlock(id) try: del self.topBlocks[block.id] except KeyError: pass # Disconnect prevBlock connection prev = block.prevBlock if prev is not None: if prev.nextBlock == block: prev.disconnectNextBlock(block) else: prevInputs = prev.inputs for input in prevInputs.keys(): if prevInputs[input] is block: prev.disconnectInput(input, "value") # Disconnect nextBlock connection next = block.nextBlock if next is not None: if next.prevBlock == block: block.disconnectNextBlock(next) # Disconnect output connection output = block.outputBlock if output is not None: outputInputs = output.inputs for input in outputInputs.keys(): if outputInputs[input] is block: output.disconnectInput(input, "value") try: del self.allBlocks[block.id] except KeyError: pass self.emit('top-block-removed', block = block) block.disposed() def connectBlock (self, id, parent, connection, input = None): childBlock = self.getBlock(id) parentBlock = self.getBlock(parent) if id in self.topBlocks: del self.topBlocks[id] if connection == "input-value": parentBlock.connectInput(input, childBlock, "value") elif connection == "input-statement": parentBlock.connectInput(input, childBlock, "statement") elif connection == "previous": parentBlock.connectNextBlock(childBlock) self.emit('top-block-removed', block = childBlock) def disconnectBlock (self, id, parent, connection, input = None): childBlock = self.getBlock(id) parentBlock = self.getBlock(parent) self.topBlocks[id] = childBlock if connection == "input-value": parentBlock.disconnectInput(input, "value") elif connection == "input-statement": parentBlock.disconnectInput(input, "statement") elif connection == "previous": parentBlock.disconnectNextBlock(childBlock) self.emit('top-block-added', block = childBlock) # # Controls # def _run (self): self._complete = defer.Deferred() dependencyGraph = [] runningBlocks = set() externalStopBlocks = set() resumeBlocks = [] self.emit("workspace-started") def _runBlock (block): if self.state is State.PAUSED: self._onResume = _onResume resumeBlocks.append(block) return if block.externalStop: externalStopBlocks.add(block) else: runningBlocks.add(block) # Run in the next tick so that dependency graph # and runningBlocks are all updated before blocks # are run (and potentially finish) d = task.deferLater(reactor, 0, block.run) d.addCallbacks( callback = _blockComplete, callbackArgs = [block], errback = _blockError, errbackArgs = [block] ) d.addErrback(log.err) def _onResume (): for block in resumeBlocks: _runBlock(block) resumeBlocks = [] def _blockComplete (result, block): if block.externalStop: return runningBlocks.discard(block) decls = block.getGlobalDeclarationNames() # Check if any other blocks can be run toRun = [] for item in dependencyGraph: for decl in decls: item["deps"].discard(decl) if len(item["deps"]) is 0: toRun.append(item) # _runBlock needs to be called in the next tick (done in _runBlock) # so that the dependency graph is updated before any new blocks run. for item in toRun: dependencyGraph.remove(item) item["block"].off("connectivity-change", item["onConnectivityChange"]) _runBlock(item["block"]) # Check if the experiment can be finished reactor.callLater(0, _checkFinished) def _blockError (failure, block): if failure.type is Disconnected: return _blockComplete(None, block) # If any one step fails, cancel the rest. if not _blockError.called: log.msg("Received error %s from block %s. Aborting." % (failure, block.id)) def _errback (error): # Pass the error if this is called as errback, or else # the original failure if abort() had no errors. # Call later to try to allow any other block-state events # to propagate before the listeners are cancelled. if not self._complete.called: _externalStop() self.state = State.ERROR reactor.callLater(0, self._complete.errback, error or failure) self.emit("workspace-stopped") _blockError.called = True try: self.abort().addBoth(_errback) except NotRunning: pass # Allow access to called within scope of _blockError _blockError.called = False def _updateDependencyGraph (data = None, block = None): toRemove = [] for item in dependencyGraph: if block is not None and item['block'] is not block: continue # If a block is no longer a top block, remove it # from the dependency graph if item['block'].prevBlock is not None: toRemove.append(item) continue # Update dependency list item['deps'] = set(item['block'].getUnmatchedVariableNames()) for item in toRemove: item['block'].off('connectivity-change', item['onConnectivityChange']) dependencyGraph.remove(item) # When a new top block is added, add it to the list of blocks that must # complete before the run can be finished; or to the list of blocks that # must be stopped when the run finishes, if appropriate. @self.on('top-block-added') def onTopBlockAdded (data): block = data['block'] if block._complete is not None and block._complete.called is False: if block.externalStop: externalStopBlocks.add(block) else: runningBlocks.add(block) block._complete.addCallbacks( callback = _blockComplete, callbackArgs = [block], errback = _blockError, errbackArgs = [block] ).addErrback(log.err) _updateDependencyGraph() self.on('top-block-removed', _updateDependencyGraph) # If there are no more running blocks, stop running. def _checkFinished (error = None): log.msg("Finished?: Waiting for %s blocks" % len(runningBlocks)) if len(runningBlocks) > 0: return log.msg("Skipped blocks:" + str(dependencyGraph)) if not (_blockError.called or self._complete.called): _externalStop() self.state = State.COMPLETE self._complete.callback(None) _removeListeners() def _removeListeners (): self.emit("workspace-stopped") self.off('top-block-added', onTopBlockAdded) self.off('top-block-removed', _updateDependencyGraph) for item in dependencyGraph: item['block'].off('connectivity-change', item['onConnectivityChange']) # Cancel all blocks which must be stopped externally. def _externalStop (): for block in externalStopBlocks: try: block.cancel(propagate = True).addErrback(log.err) except NotRunning: pass # Set up the dependency graph allDeclaredGlobalVariables = set() blocksToRunImmediately = [] dependencyError = False # Create a list of all global variables defined in the workspace for block in self.topBlocks.values(): allDeclaredGlobalVariables.update(block.getGlobalDeclarationNames()) def _generateOnConnectivityChange (block): def onConnectivityChange (data): _updateDependencyGraph(block = block) return onConnectivityChange # Defer blocks with dependencies until these have been met. for block in self.topBlocks.values(): deps = set(block.getUnmatchedVariableNames()) # Check that all of these dependencies will be met. for dep in deps: if dep not in allDeclaredGlobalVariables: self.emit( "log-message", level = "error", message = "Referenced variable {:s} is never defined. ".format(dep), block = block.id ) dependencyError = True if len(deps) is 0: log.msg("Block %s has no deps, running now" % block.id) blocksToRunImmediately.append(block) else: log.msg("Block %s waiting for %s" % (block.id, deps)) onConnectivityChange = _generateOnConnectivityChange(block) block.on("connectivity-change", onConnectivityChange) dependencyGraph.append({ "block": block, "deps": deps, "onConnectivityChange": onConnectivityChange }) # If there are no blocks that have no dependencies, then # there must be a circular dependency somewhere! if len(blocksToRunImmediately) == 0: self.emit( "log-message", level = "error", message = "No blocks can run." ) dependencyError = True # Check for circular dependencies using a topological sorting algorithm def findCircularDependencies (blocks, graph): circularDeps = [] while len(blocks) > 0: block = blocks.pop() toRemove = [] for item in graph: for decl in block["decls"]: item["deps"].discard(decl) if len(item["deps"]) is 0: toRemove.append(item) for item in toRemove: graph.remove(item) blocks.append(item) # Remove any blocks that just depend on one of the # circularly-dependent blocks toRemove = [] for item in graph: if len(item["decls"]) == 0: toRemove.append(item) for item in toRemove: graph.remove(item) return graph circularDeps = findCircularDependencies( blocks = [{ "block": block.id, "position": block.position, "decls": block.getGlobalDeclarationNames() } for block in blocksToRunImmediately], graph = [{ "block": item["block"].id, "position": item["block"].position, "deps": item["deps"].copy(), "decls": item["block"].getGlobalDeclarationNames() } for item in dependencyGraph] ) if len(circularDeps) > 0: self.emit( "log-message", level = "error", message = "Circular dependencies detected:" ) for item in sorted( circularDeps, key = lambda item: item["position"] ): self.emit( "log-message", level = "error", message = "* {:s} depends on {:s}".format( ', '.join(item["decls"]), ', '.join(item["deps"]) ), block = item["block"] ) dependencyError = True # Do not run if there was an error with the dependencies. if dependencyError: self.state = State.COMPLETE self._complete.errback(Exception("Dependency errors prevented start.")) _removeListeners() # Run blocks with no dependencies in order of their position. # Blocks are sorted first by x then by y. else: for block in sorted( blocksToRunImmediately, key = lambda b: b.position ): _runBlock(block) return self._complete def _reset (self): results = [] for block in self.topBlocks.values(): try: results.append(block.reset()) except AlreadyRunning: pass return defer.DeferredList(results) def _pause (self): results = [] for block in self.topBlocks.values(): try: results.append(block.pause()) except NotRunning: pass self.emit("workspace-paused") return defer.DeferredList(results) def _resume (self): results = [] for block in self.topBlocks.values(): try: block.resume() except NotPaused: pass self.emit("workspace-resumed") return defer.DeferredList(results) def _cancel (self, abort = False): results = [] for block in self.topBlocks.values(): try: block.cancel(abort) except NotRunning: pass return defer.DeferredList(results) # # Serialisation # def toEvents (self): events = [] for block in self.topBlocks.values(): events.extend(block.toEvents()) return events def fromEvents (self, events): for e in events: if "block" in e['data']: e['data']['id'] = e['data']['block'] event = Event.fromPayload(e['type'], e['data']) event.apply(self) class Variables (EventEmitter): def __init__ (self): self._variables = {} self._handlers = {} def add (self, name, variable): if name in self._variables: if self._variables[name] is variable: return self.remove(name) self._variables[name] = variable def _makeHandler (name): def onChange (data): self.emit('variable-changed', name = name, **data) return onChange if isinstance(variable, BaseVariable): onChange = _makeHandler(name) variable.on('change', onChange) self._handlers[name] = onChange self.emit('variable-added', name = name, variable = variable) elif isinstance(variable, Component): handlers = {} for attrname, attr in variable.variables.items(): onChange = _makeHandler(attrname) attr.on('change', onChange) handlers[attrname] = onChange self._variables[attrname] = attr self.emit('variable-added', name = attrname, variable = variable) self._handlers[name] = handlers else: self._handlers[name] = None def remove (self, name): try: variable = self._variables[name] except KeyError: return if isinstance(variable, BaseVariable): variable.off( 'change', self._handlers[name] ) self.emit('variable-removed', name = name, variable = variable) elif isinstance(variable, Component): for attrname, attr in variable.variables.items(): attr.off( 'change', self._handlers[name][attrname] ) self.emit('variable-removed', name = attrname, variable = variable) del self._variables[attrname] del self._variables[name] del self._handlers[name] def rename (self, oldName, newName): log.msg("Renaming variable: %s to %s" % (oldName, newName)) if oldName == newName: return try: variable = self._variables[oldName] except KeyError: return if isinstance(variable, Component): oldNames = [name for name, var in variable.variables.items()] else: oldNames = [oldName] variable.alias = newName for name in oldNames: variable = self._variables[name] newName = variable.alias self._variables[newName] = self._variables[name] self._handlers[newName] = self._handlers[name] del self._variables[name] del self._handlers[name] self.emit('variable-renamed', oldName = name, newName = newName, variable = variable ) def get (self, name): try: return self._variables[name] except KeyError: return None __getitem__ = get __setitem__ = add __delitem__ = remove def items (self): return self._variables.items() def values (self): return self._variables.values() def anyOfStackIs (block, states): while block: if block.state in states: return True block = block.nextBlock class Block (BaseStep, EventEmitter): # If this block needs to be stopped by the workspace # (e.g. long-running disconnected controls) # TODO: make this more general - this ought to be True # for any block with an output connection which is started # by eval() rather than run() externalStop = False # If this block returns an output, the output data type # may be specified. Useful if the block does not return a # value immediately. outputType = None @property def state (self): return self._state @state.setter def state (self, value): self._state = value self.workspace.emit("block-state", block = self.id, state = value.name) @property def disabled (self): try: return self._disabled except AttributeError: return False @disabled.setter def disabled (self, disabled): self._disabled = bool(disabled) try: if disabled: self.cancel(propagate = False) else: self.reset(propagate = False) except (NotRunning, AlreadyRunning): pass self.emit("connectivity-changed") def __init__ (self, workspace, id): self.workspace = workspace self.id = id self.type = self.__class__.__name__ self.state = State.READY self.nextBlock = None self.prevBlock = None self.outputBlock = None self.parentInput = None self._complete
= pyvips.Image.new_from_file(TIF_FILE) x = x.copy() x.set("orientation", 2) x.write_to_file(filename) x = pyvips.Image.new_from_file(filename) y = x.get("orientation") assert y == 2 x = x.copy() x.remove("orientation") filename = temp_filename(self.tempdir, '.tif') x.write_to_file(filename) x = pyvips.Image.new_from_file(filename) y = x.get("orientation") assert y == 1 filename = temp_filename(self.tempdir, '.tif') x = pyvips.Image.new_from_file(TIF_FILE) x = x.copy() x.set("orientation", 6) x.write_to_file(filename) x1 = pyvips.Image.new_from_file(filename) x2 = pyvips.Image.new_from_file(filename, autorotate=True) assert x1.width == x2.height assert x1.height == x2.width filename = temp_filename(self.tempdir, '.tif') x = pyvips.Image.new_from_file(TIF_FILE) x = x.copy() x.write_to_file(filename, xres=100, yres=200, resunit="cm") x1 = pyvips.Image.new_from_file(filename) assert x1.get("resolution-unit") == "cm" assert x1.xres == 100 assert x1.yres == 200 filename = temp_filename(self.tempdir, '.tif') x = pyvips.Image.new_from_file(TIF_FILE) x = x.copy() x.write_to_file(filename, xres=100, yres=200, resunit="inch") x1 = pyvips.Image.new_from_file(filename) assert x1.get("resolution-unit") == "in" assert x1.xres == 100 assert x1.yres == 200 # OME support in 8.5 x = pyvips.Image.new_from_file(OME_FILE) assert x.width == 439 assert x.height == 167 page_height = x.height x = pyvips.Image.new_from_file(OME_FILE, n=-1) assert x.width == 439 assert x.height == page_height * 15 x = pyvips.Image.new_from_file(OME_FILE, page=1, n=-1) assert x.width == 439 assert x.height == page_height * 14 x = pyvips.Image.new_from_file(OME_FILE, page=1, n=2) assert x.width == 439 assert x.height == page_height * 2 x = pyvips.Image.new_from_file(OME_FILE, n=-1) assert x(0, 166)[0] == 96 assert x(0, 167)[0] == 0 assert x(0, 168)[0] == 1 filename = temp_filename(self.tempdir, '.tif') x.write_to_file(filename) x = pyvips.Image.new_from_file(filename, n=-1) assert x.width == 439 assert x.height == page_height * 15 assert x(0, 166)[0] == 96 assert x(0, 167)[0] == 0 assert x(0, 168)[0] == 1 # pyr save to buffer added in 8.6 x = pyvips.Image.new_from_file(TIF_FILE) buf = x.tiffsave_buffer(tile=True, pyramid=True) filename = temp_filename(self.tempdir, '.tif') x.tiffsave(filename, tile=True, pyramid=True) with open(filename, 'rb') as f: buf2 = f.read() assert len(buf) == len(buf2) a = pyvips.Image.new_from_buffer(buf, "", page=2) b = pyvips.Image.new_from_buffer(buf2, "", page=2) assert a.width == b.width assert a.height == b.height assert a.avg() == b.avg() x = pyvips.Image.new_from_file(TIF_FILE) buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="mean") buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="mode") buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="median") buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="max") buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="min") buf = x.tiffsave_buffer(tile=True, pyramid=True, region_shrink="nearest") @skip_if_no("magickload") def test_magickload(self): def bmp_valid(im): a = im(100, 100) assert_almost_equal_objects(a, [227, 216, 201]) assert im.width == 1419 assert im.height == 1001 self.file_loader("magickload", BMP_FILE, bmp_valid) self.buffer_loader("magickload_buffer", BMP_FILE, bmp_valid) # we should have rgb or rgba for svg files ... different versions of # IM handle this differently. GM even gives 1 band. im = pyvips.Image.magickload(SVG_FILE) assert im.bands == 3 or im.bands == 4 or im.bands == 1 # density should change size of generated svg im = pyvips.Image.magickload(SVG_FILE, density='100') width = im.width height = im.height im = pyvips.Image.magickload(SVG_FILE, density='200') # This seems to fail on travis, no idea why, some problem in their IM # perhaps # assert im.width == width * 2 # assert im.height == height * 2 im = pyvips.Image.magickload(GIF_ANIM_FILE) width = im.width height = im.height im = pyvips.Image.magickload(GIF_ANIM_FILE, n=-1) assert im.width == width assert im.height == height * 5 # page/n let you pick a range of pages # 'n' param added in 8.5 if pyvips.at_least_libvips(8, 5): im = pyvips.Image.magickload(GIF_ANIM_FILE) width = im.width height = im.height im = pyvips.Image.magickload(GIF_ANIM_FILE, page=1, n=2) assert im.width == width assert im.height == height * 2 page_height = im.get("page-height") assert page_height == height # should work for dicom im = pyvips.Image.magickload(DICOM_FILE) assert im.width == 128 assert im.height == 128 # some IMs are 3 bands, some are 1, can't really test # assert im.bands == 1 # libvips has its own sniffer for ICO, test that with open(ICO_FILE, 'rb') as f: buf = f.read() im = pyvips.Image.new_from_buffer(buf, "") assert im.width == 16 assert im.height == 16 # added in 8.7 @skip_if_no("magicksave") def test_magicksave(self): # save to a file and load again ... we can't use save_load_file since # we want to make sure we use magickload/save # don't use BMP - GraphicsMagick always adds an alpha # don't use TIF - IM7 will save as 16-bit filename = temp_filename(self.tempdir, ".jpg") self.colour.magicksave(filename) x = pyvips.Image.magickload(filename) assert self.colour.width == x.width assert self.colour.height == x.height assert self.colour.bands == x.bands max_diff = (self.colour - x).abs().max() assert max_diff < 60 self.save_load_buffer("magicksave_buffer", "magickload_buffer", self.colour, 60, format="JPG") # try an animation if have("gifload"): x1 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=-1) w1 = x1.magicksave_buffer(format="GIF") x2 = pyvips.Image.new_from_buffer(w1, "", n=-1) assert x1.get("delay") == x2.get("delay") assert x1.get("page-height") == x2.get("page-height") # magicks vary in how they handle this ... just pray we are close assert abs(x1.get("gif-loop") - x2.get("gif-loop")) < 5 @skip_if_no("webpload") def test_webp(self): def webp_valid(im): a = im(10, 10) # different webp versions use different rounding systems leading # to small variations assert_almost_equal_objects(a, [71, 166, 236], threshold=2) assert im.width == 550 assert im.height == 368 assert im.bands == 3 self.file_loader("webpload", WEBP_FILE, webp_valid) self.buffer_loader("webpload_buffer", WEBP_FILE, webp_valid) self.save_load_buffer("webpsave_buffer", "webpload_buffer", self.colour, 60) self.save_load("%s.webp", self.colour) # test lossless mode im = pyvips.Image.new_from_file(WEBP_FILE) buf = im.webpsave_buffer(lossless=True) im2 = pyvips.Image.new_from_buffer(buf, "") assert abs(im.avg() - im2.avg()) < 1 # higher Q should mean a bigger buffer b1 = im.webpsave_buffer(Q=10) b2 = im.webpsave_buffer(Q=90) assert len(b2) > len(b1) # try saving an image with an ICC profile and reading it back ... if we # can do it, our webp supports metadata load/save buf = self.colour.webpsave_buffer() im = pyvips.Image.new_from_buffer(buf, "") if im.get_typeof("icc-profile-data") != 0: # verify that the profile comes back unharmed p1 = self.colour.get("icc-profile-data") p2 = im.get("icc-profile-data") assert p1 == p2 # add tests for exif, xmp, ipct # the exif test will need us to be able to walk the header, # we can't just check exif-data # we can test that exif changes change the output of webpsave # first make sure we have exif support z = pyvips.Image.new_from_file(JPEG_FILE) if z.get_typeof("exif-ifd0-Orientation") != 0: x = self.colour.copy() x.set("orientation", 6) buf = x.webpsave_buffer() y = pyvips.Image.new_from_buffer(buf, "") assert y.get("orientation") == 6 # try converting an animated gif to webp ... can't do back to gif # again without IM support if have("gifload"): x1 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=-1) w1 = x1.webpsave_buffer(Q=10) x2 = pyvips.Image.new_from_buffer(w1, "", n=-1) assert x1.width == x2.width assert x1.height == x2.height assert x1.get("delay") == x2.get("delay") assert x1.get("page-height") == x2.get("page-height") assert x1.get("gif-loop") == x2.get("gif-loop") @skip_if_no("analyzeload") def test_analyzeload(self): def analyze_valid(im): a = im(10, 10) assert pytest.approx(a[0]) == 3335 assert im.width == 128 assert im.height == 8064 assert im.bands == 1 self.file_loader("analyzeload", ANALYZE_FILE, analyze_valid) @skip_if_no("matload") def test_matload(self): def matlab_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [38671.0, 33914.0, 26762.0]) assert im.width == 290 assert im.height == 442 assert im.bands == 3 self.file_loader("matload", MATLAB_FILE, matlab_valid) @skip_if_no("openexrload") def test_openexrload(self): def exr_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [0.124512, 0.159668, 0.040375, 255.0], threshold=0.00001) assert im.width == 610 assert im.height == 406 assert im.bands == 4 self.file_loader("openexrload", EXR_FILE, exr_valid) @skip_if_no("fitsload") def test_fitsload(self): def fits_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [-0.165013, -0.148553, 1.09122, -0.942242], threshold=0.00001) assert im.width == 200 assert im.height == 200 assert im.bands == 4 self.file_loader("fitsload", FITS_FILE, fits_valid) self.save_load("%s.fits", self.mono) @skip_if_no("niftiload") def test_niftiload(self): def nifti_valid(im): a = im(30, 26) assert_almost_equal_objects(a, [131]) assert im.width == 91 assert im.height == 9919 assert im.bands == 1 self.file_loader("niftiload", NIFTI_FILE, nifti_valid) self.save_load("%s.nii.gz", self.mono) @skip_if_no("openslideload") def test_openslideload(self): def openslide_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [244, 250, 243, 255]) assert im.width == 2220 assert im.height == 2967 assert im.bands == 4 self.file_loader("openslideload", OPENSLIDE_FILE, openslide_valid) @skip_if_no("pdfload") def test_pdfload(self): def pdf_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [35, 31, 32, 255]) assert im.width == 1134 assert im.height == 680 assert im.bands == 4 self.file_loader("pdfload", PDF_FILE, pdf_valid) self.buffer_loader("pdfload_buffer", PDF_FILE, pdf_valid) im = pyvips.Image.new_from_file(PDF_FILE) x = pyvips.Image.new_from_file(PDF_FILE, scale=2) assert abs(im.width * 2 - x.width) < 2 assert abs(im.height * 2 - x.height) < 2 im = pyvips.Image.new_from_file(PDF_FILE) x = pyvips.Image.new_from_file(PDF_FILE, dpi=144) assert abs(im.width * 2 - x.width) < 2 assert abs(im.height * 2 - x.height) < 2 @skip_if_no("gifload") def test_gifload(self): def gif_valid(im): a = im(10, 10) assert_almost_equal_objects(a, [33]) assert im.width == 159 assert im.height == 203 assert im.bands == 1 self.file_loader("gifload", GIF_FILE, gif_valid) self.buffer_loader("gifload_buffer", GIF_FILE, gif_valid) # 'n' param added in 8.5 if pyvips.at_least_libvips(8, 5): x1 = pyvips.Image.new_from_file(GIF_ANIM_FILE) x2 = pyvips.Image.new_from_file(GIF_ANIM_FILE, n=2) assert x2.height
# -*- coding: utf-8 -*- from __future__ import print_function, division import re from PyAstronomy.pyaC import pyaErrors as PE import pickle import os import uuid import six import six.moves as smo def equal(dependsOn): return dependsOn class Params: """ Manage a set of parameters. This class provides a framework for managing parameter values and several associated aspects such as restrictions and relation. Parameter management is implemented in view of later use by a a fit routine. Parameters ---------- paramNames : list of strings Specifies the names of the parameters to be managed. Attributes ---------- __params : dict A dictionary containing entries of the kind [variable-name:value]. The `__params` dictionary may only be access by the assignValue function to prevent causing inconsistencies, which may occur especially if relations exist. . paramNum : dict A dictionary assigning every parameter name to a number (e.g., paramNum[2] = "XYZ"). Such a numbering is important to guarantee the correct order of variables in the fitting process. The ordering is the same as the order in which the constructor obtains the parameter names. isFree : dict A dictionary associating each parameter with a boolean saying whether it shall be considered a free parameter during optimization. isRestricted : dict A dictionary associating each parameter with a boolean saying whether there is a restriction to the allowed range (see `restrictions`). restrictions : dict A dictionary containing entries of the form [parName:[lower, upper]. Here 'lower' and 'upper' are floats defining lower and upper limits for the variable's value. relations : dict Parameters may be interrelated, e.g., A = B + C. This dictionary holds the definition of such relations. In particular, the entries have the form {"b":[ ["a", func, ["b", "c"]], [...] ]...}. This means that a = func(b, c). func is a function pointer. Whenever b is assigned a new value, 'a' has to be updated as well. Note that it is important that the independent variables "know" about the relation, because if their value changes, the value of the dependent variable (in this case `a`) has to be updated. conditionalRestrictions : dict A dictionary holding the `conditional restrictions', i.e., complex restrictions, which may, e.g., depend in the values of other parameters. The dictionary key is a unique ID generated, when a conditional restriction is added. For each key, the dictionary holds a tuple of which the first entry is a list holding the names of the parameters on which the conditional restrictions depends and the second is a callable, which is called with the values of those parameters specified in the first entry. The callable must return a float that specifies the penalty (or reward) depending on the given parameter values. Because conditional restrictions are referred to using a unique ID, their name (i.e., ID) does not change if models are combined. Notes ----- Different models make different demands on the variables. For example, only certain ranges may be valid, some are constant and others not, or there may be a functional dependence between different variables. This class provides a framework to manage a parameter set appropriately. Depending on what kind of model is currently adapted, the number, names, allowed ranges, and interdependencies of variables can differ. On creation, this class is given a list with parameter names to manage. Those can than be assigned values. Parameters can be "thawed", i.e., regarded free during the fit process, or frozen. The allowed ranges can be restricted either on one or both sides, and interdependencies can be defined via the `relate` method. """ def __add__(self, right): """ This member allows to combine the properties of two Param class \ instances. All parameter properties will be copied into the new \ Parameter class instance, which is returned by this function. .. caution:: All parameter names must be unique! """ paLeft = self.parameters() paRight = right.parameters() # Check whether parameters have unique names for p in paLeft: if p in paRight: raise(PE.PyANameClash( "At least two parameters share the same name.", where="Params::__add__")) # List of new names nlist = list(paLeft.keys()) nlist.extend(paRight.keys()) # Create new parameter objects result = Params(nlist) # Combine properties result.paramNum = self.paramNum.copy() pn = len(result.paramNum) for i in smo.range(len(right.paramNum)): result.paramNum[pn + i] = right.paramNum[i] result.isFree = self.isFree.copy() result.isFree.update(right.isFree) result.isRestricted = self.isRestricted.copy() result.isRestricted.update(right.isRestricted) result.restrictions = self.restrictions.copy() result.restrictions.update(right.restrictions) result.relations = self.relations.copy() result.relations.update(right.relations) result.conditionalRestrictions = self.conditionalRestrictions.copy() result.conditionalRestrictions.update(right.conditionalRestrictions) result.assignValue(self.parameters()) result.assignValue(right.parameters()) return result def renameParameter(self, old, new): """ Rename an existing parameter. Parameters ---------- old : string The existing (old) name. new : string The new name. """ self.__checkForParam(old) if new in self.__params: raise(PE.PyAValError("Parameter already exists: " + new, where="Params::renameParameter")) self.__params[new] = self.__params[old] del self.__params[old] for i in smo.range(len(self.paramNum)): if self.paramNum[i] == old: self.paramNum[i] = new break self.isFree[new] = self.isFree[old] del self.isFree[old] self.isRestricted[new] = self.isRestricted[old] del self.isRestricted[old] self.restrictions[new] = self.restrictions[old] del self.restrictions[old] self.relations[new] = self.relations[old] del self.relations[old] # Loop through relations, search, and replace occurrences of the old name. for p in six.iterkeys(self.__params): relations = self.relations[p] if relations == []: continue for k in smo.range(len(relations)): relat = relations[k] if relat[0] == old: relat[0] = new for i in smo.range(len(relat[2])): if relat[2][i] == old: relat[2][i] = new self.relations[p][k] = relat # Loop over conditional restrictions and replace occurrences of the old names for name, v in six.iteritems(self.conditionalRestrictions): for i, p in enumerate(v[0]): # Loop over input-parameter names and replace if necessary if p == old: # Needs to be replaced self.conditionalRestrictions[name][0][i] = new def __init__(self, paramNames): i = 0 # Define instance properties self.__params = {} self.paramNum = {} self.isFree = {} self.isRestricted = {} self.restrictions = {} self.relations = {} self.conditionalRestrictions = {} for n in paramNames: self.__params[n] = 0.0 self.isFree[n] = False self.paramNum[i] = n i += 1 # Firstly, the lower bound, secondly, the upper bound self.isRestricted[n] = [False, False] self.restrictions[n] = [None, None] self.relations[n] = [] def addConditionalRestriction(self, pars, func): """ Define a conditional restriction. Conditional restrictions can be used to modify the behavior in a more complex manner. For instance, penalties can be added to the objective function depending on the relation of one or more parameters. The given function is evaluated in each iteration and its return value (a float) is added to the objective function (e.g., chi square). Parameters ---------- pars : list of strings The names of the parameters the given function needs as input. func : callable object This callable object must take the specified parameters (in that exact order) as input. It must return a float, which is added to the value of the objective function. Returns ------- identifier : string A unique ID used to refer to the conditional restriction. """ for p in pars: self.__checkForParam(p) name = uuid.uuid4() self.conditionalRestrictions[name] = (pars[:], func) return name def removeConditionalRestriction(self, id): """ Remove an existing conditional constraint. Parameters ---------- id : string The identifier used to refer to the conditional constraint (returned by `addConditionalRestriction`). """ if not id in self.conditionalRestrictions: raise(PE.PyAValError("No conditional restriction with ID '" + str(id))) del self.conditionalRestrictions[id] def showConditionalRestrictions(self, toScreen=True): """ Show conditional restrictions. Parameters ---------- toScreen : boolean, optional If True (default), the output is written to stdout. Returns ------- Output : list of strings The output as a list of strings. """ ll = [] nc = 80 ll.append("-" * nc) ll.append(" Conditional restrictions") ll.append("-" * nc) for name, v in six.iteritems(self.conditionalRestrictions): s = "ID: " + str(name) + ", parameters: " s += ', '.join(v[0]) ll.append(s) ll.append("-" * nc) if toScreen: for l in ll: print(l) return ll def applyConditionalRestrictions(self, fullout=False): """ Apply all conditional restrictions. Parameters ---------- fullout : boolean, optional If True, a dictionary holding the values of the individual restrictions is returned. The IDs are used as dictionary keys. The default is False. Returns ------- Modification : float The summed value of the existing conditional restrictions. Individual values : dictionary, optional The contributions of the individual conditional restrictions. """ result =
'Wound drainage'), ('WNDE', 'Wound exudate'), ('XXX', 'To be specified in another part of the message'))), 'HL70074': ('Diagnostic service section ID', (('AU', 'Audiology'), ('BG', 'Blood Gases'), ('BLB', 'Blood Bank'), ('CH', 'Chemistry'), ('CP', 'Cytopathology'), ('CT', 'CAT Scan'), ('CTH', 'Cardiac Catheterization'), ('CUS', 'Cardiac Ultrasound'), ('EC', 'Electrocardiac (e.g., EKG, EEC, Holter)'), ('EN', 'Electroneuro (EEG, EMG,EP,PSG)'), ('HM', 'Hematology'), ('ICU', 'Bedside ICU Monitoring'), ('IMG', 'Diagnostic Imaging'), ('IMM', 'Immunology'), ('LAB', 'Laboratory'), ('MB', 'Microbiology'), ('MCB', 'Mycobacteriology'), ('MYC', 'Mycology'), ('NMR', 'Nuclear Magnetic Resonance'), ('NMS', 'Nuclear Medicine Scan'), ('NRS', 'Nursing Service Measures'), ('OSL', 'Outside Lab'), ('OT', 'Occupational Therapy'), ('OTH', 'Other'), ('OUS', 'OB Ultrasound'), ('PAR', 'Parasitology'), ('PAT', 'Pathology (gross & histopath, not surgical)'), ('PF', 'Pulmonary function'), ('PHR', 'Pharmacy'), ('PHY', 'Physician (Hx. Dx, admission note, etc.)'), ('PT', 'Physical Therapy'), ('RAD', 'Radiology'), ('RC', 'Respiratory Care (therapy)'), ('RT', 'Radiation therapy'), ('RUS', 'Radiology ultrasound'), ('RX', 'Radiograph'), ('SP', 'Surgical Pathology'), ('SR', 'Serology'), ('TX', 'Toxicology'), ('URN', 'Urinalysis'), ('VR', 'Virology'), ('VUS', 'Vascular Ultrasound'), ('XRC', 'Cineradiograph'))), 'HL70076': ('Message type', (('ACK', 'General acknowledgment message'), ('ADR', 'ADT response'), ('ADT', 'ADT message'), ('BAR', 'Add/change billing account'), ('CRM', 'Clinical study registration message'), ('CSU', 'Unsolicited study data message'), ('DFT', 'Detail financial transactions'), ('DOC', 'Document response'), ('DSR', 'Display response'), ('EAC', 'Automated equipment command message'), ('EAN', 'Automated equipment notification message'), ('EAR', 'Automated equipment response message'), ('EDR', 'Enhanced display response'), ('EQQ', 'Embedded query language query'), ('ERP', 'Event replay response'), ('ESR', 'Automated equipment status update acknowledgement message'), ('ESU', 'Automated equipment status update message'), ('INR', 'Automated equipment inventory request message'), ('INU', 'Automated equipment inventory update message'), ('LSR', 'Automated equipment log/service request message'), ('LSU', 'Automated equipment log/service update message'), ('MCF', 'Delayed Acknowledgement (Retained for backward compatibility only)'), ('MDM', 'Medical document management'), ('MFD', 'Master files delayed application acknowledgment'), ('MFK', 'Master files application acknowledgment'), ('MFN', 'Master files notification'), ('MFQ', 'Master files query'), ('MFR', 'Master files response'), ('NMD', 'Application management data message'), ('NMQ', 'Application management query message'), ('NMR', 'Application management response message'), ('OMD', 'Dietary order'), ('OMG', 'General clinical order message'), ('OML', 'Laboratory order message'), ('OMN', 'Non-stock requisition order message'), ('OMP', 'Pharmacy/treatment order message'), ('OMS', 'Stock requisition order message'), ('ORD', 'Dietary order - General order acknowledgment message'), ('ORF', 'Query for results of observation'), ('ORG', 'General clinical order acknowledgement message'), ('ORL', 'Laboratory acknowledgement message (unsolicited)'), ('ORM', 'Pharmacy/treatment order message'), ('ORN', 'Non-stock requisition - General order acknowledgment message'), ('ORP', 'Pharmacy/treatment order acknowledgement message'), ('ORR', 'General order response message response to any ORM'), ('ORS', 'Stock requisition - General order acknowledgment message'), ('ORU', 'Unsolicited transmission of an observation message'), ('OSQ', 'Query response for order status'), ('OSR', 'Query response for order status'), ('OUL', 'Unsolicited laboratory observation message'), ('PEX', 'Product experience message'), ('PGL', 'Patient goal message'), ('PIN', 'Patient insurance information'), ('PMU', 'Add personnel record'), ('PPG', 'Patient pathway message (goal-oriented)'), ('PPP', 'Patient pathway message (problem-oriented)'), ('PPR', 'Patient problem message'), ('PPT', 'Patient pathway goal-oriented response'), ('PPV', 'Patient goal response'), ('PRR', 'Patient problem response'), ('PTR', 'Patient pathway problem-oriented response'), ('QBP', 'Query by parameter'), ('QCK', 'Deferred query'), ('QCN', 'Cancel query'), ('QRY', 'Query, original mode'), ('QSB', 'Create subscription'), ('QSX', 'Cancel subscription/acknowledge message'), ('QVR', 'Query for previous events'), ('RAR', 'Pharmacy/treatment administration information'), ('RAS', 'Pharmacy/treatment administration message'), ('RCI', 'Return clinical information'), ('RCL', 'Return clinical list'), ('RDE', 'Pharmacy/treatment encoded order message'), ('RDR', 'Pharmacy/treatment dispense information'), ('RDS', 'Pharmacy/treatment dispense message'), ('RDY', 'Display based response'), ('REF', 'Patient referral'), ('RER', 'Pharmacy/treatment encoded order information'), ('RGR', 'Pharmacy/treatment dose information'), ('RGV', 'Pharmacy/treatment give message'), ('ROR', 'Pharmacy/treatment order response'), ('RPA', 'Return patient authorization'), ('RPI', 'Return patient information'), ('RPL', 'Return patient display list'), ('RPR', 'Return patient list'), ('RQA', 'Request patient authorization'), ('RQC', 'Request clinical information'), ('RQI', 'Request patient information'), ('RQP', 'Request patient demographics'), ('RQQ', 'Event replay query'), ('RRA', 'Pharmacy/treatment administration acknowledgement message'), ('RRD', 'Pharmacy/treatment dispense acknowledgment message'), ('RRE', 'Pharmacy/treatment encoded order acknowledgment message'), ('RRG', 'Pharmacy/treatment give acknowledgment message'), ('RRI', 'Return referral information'), ('RSP', 'Segment pattern response'), ('RTB', 'Tabular response'), ('SIU', 'Schedule information unsolicited'), ('SPQ', 'Stored procedure request'), ('SQM', 'Schedule query message'), ('SQR', 'Schedule query response'), ('SRM', 'Schedule request message'), ('SRR', 'Scheduled request response'), ('SSR', 'Specimen status request message'), ('SSU', 'Specimen status update message'), ('SUR', 'Summary product experience report'), ('TBR', 'Tabular data response'), ('TCR', 'Automated equipment test code settings request message'), ('TCU', 'Automated equipment test code settings update message'), ('UDM', 'Unsolicited display update message'), ('VQQ', 'Virtual table query'), ('VXQ', 'Query for vaccination record'), ('VXR', 'Vaccination record response'), ('VXU', 'Unsolicited vaccination record update'), ('VXX', 'Response for vaccination query with multiple PID matches'))), 'HL70078': ('Abnormal flags', (('<', 'Below absolute low-off instrument scale'), ('>', 'Above absolute high-off instrument scale'), ('A', 'Abnormal (applies to non-numeric results)'), ('AA', 'Very abnormal (applies to non-numeric units, analogous to panic limits for numeric units)'), ('B', 'Better--use when direction not relevant'), ('D', 'Significant change down'), ('H', 'Above high normal'), ('HH', 'Above upper panic limits'), ('I', 'Intermediate*'), ('L', 'Below low normal'), ('LL', 'Below lower panic limits'), ('MS', 'Moderately susceptible*'), ('N', 'Normal (applies to non-numeric results)'), ('null', "No range defined, or normal ranges don't apply"), ('R', 'Resistant*'), ('S', 'Susceptible*'), ('U', 'Significant change up'), ('VS', 'Very susceptible*'), ('W', 'Worse--use when direction not relevant'))), 'HL70080': ('Nature of abnormal testing', (('A', 'An age-based population'), ('N', 'None - generic normal range'), ('R', 'A race-based population'), ('S', 'A sex-based population'))), 'HL70083': ('Outlier type', (('C', 'Outlier cost'), ('D', 'Outlier days'))), 'HL70085': ('Observation result status codes interpretation', (('C', 'Record coming over is a correction and thus replaces a final result'), ('D', 'Deletes the OBX record'), ('F', 'Final results; Can only be changed with a corrected result.'), ('I', 'Specimen in lab; results pending'), ('N', 'Not asked; used to affirmatively document that the observation identified in the OBX was ' 'not sought when the universal service ID in OBR-4 implies that it would be sought.'), ('O', 'Order detail description only (no result)'), ('P', 'Preliminary results'), ('R', 'Results entered -- not verified'), ('S', 'Partial results'), ('U', "Results status change to final without retransmitting results already sent as 'preliminary.' " "E.g., radiology changes status from preliminary to final"), ('W', 'Post original as wrong, e.g., transmitted for wrong patient'), ('X', 'Results cannot be obtained for this observation'))), 'HL70091': ('Query priority', (('D', 'Deferred'), ('I', 'Immediate'))), 'HL70092': ('Re-admission indicator', (('R', 'Re-admission'),)), 'HL70093': ('Release information', (('...', 'user-defined codes'), ('N', 'No'), ('Y', 'Yes'))), 'HL70098': ('Type of agreement', (('M', 'Maternity'), ('S', 'Standard'), ('U', 'Unified'))), 'HL70100': ('When to charge', (('D', 'On discharge'), ('O', 'On receipt of order'), ('R', 'At time service is completed'), ('S', 'At time service is started'), ('T', 'At a designated date/time'))), 'HL70102': ('Delayed acknowledgment type', (('D', 'Message received, stored for later processing'), ('F', 'acknowledgment after processing'))), 'HL70103': ('Processing ID', (('D', 'Debugging'), ('P', 'Production'), ('T', 'Training'))), 'HL70104': ('Version ID', (('2.0', 'Release 2.0'), ('2.0D', 'Demo 2.0'), ('2.1', 'Release 2. 1'), ('2.2', 'Release 2.2'), ('2.3', 'Release 2.3'), ('2.3.1', 'Release 2.3.1'), ('2.4', 'Release 2.4'))), 'HL70105': ('Source of comment', (('L', 'Ancillary (filler) department is source of comment'), ('O', 'Other system is source of comment'), ('P', 'Orderer (placer) is source of comment'))), 'HL70106': ('Query/response format code', (('D', 'Response is in display format'), ('R', 'Response is in record-oriented format'), ('T', 'Response is in tabular format'))), 'HL70107': ('Deferred response type', (('B', 'Before the Date/Time specified'), ('L', 'Later than the Date/Time specified'))), 'HL70108': ('Query results level', (('O', 'Order plus order status'), ('R', 'Results without bulk text'), ('S', 'Status only'), ('T', 'Full results'))), 'HL70109': ('Report priority', (('R', 'Routine'), ('S', 'Stat'))), 'HL70112': ('Discharge disposition', (('01', 'Discharged to home or self care (routine discharge)'), ('02', 'Discharged/transferred to another short term general hospital for inpatient care'), ('03', 'Discharged/transferred to skilled nursing facility (SNF)'), ('04', 'Discharged/transferred to an intermediate care facility (ICF)'), ('05', 'Discharged/transferred to another type of institution for inpatient care or referred for ' 'outpatient services to another institution'), ('06', 'Discharged/transferred to home under care of organized home health service organization'), ('07', 'Left against medical advice or discontinued care'), ('08', 'Discharged/transferred to home under care of Home IV provider'), ('09', 'Admitted as an inpatient to this hospital'), ('10 ...19', 'Discharge to be defined at state level, if necessary'), ('20', 'Expired (i.e. dead)'), ('21 ... 29', 'Expired to be defined at state level, if necessary'),
= '\n' else: eol_ = '' if self.Malware_Subject_Node_A is not None: self.Malware_Subject_Node_A.export(write, level, 'maecPackage:', name_='Malware_Subject_Node_A', pretty_print=pretty_print) if self.Malware_Subject_Node_B is not None: self.Malware_Subject_Node_B.export(write, level, 'maecPackage:', name_='Malware_Subject_Node_B', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('similarity_distance', node) if value is not None and 'similarity_distance' not in already_processed: already_processed.add('similarity_distance') try: self.similarity_distance = float(value) except ValueError as exp: raise ValueError('Bad float/double attribute (similarity_distance): %s' % exp) value = find_attr_value_('similarity_index', node) if value is not None and 'similarity_index' not in already_processed: already_processed.add('similarity_index') try: self.similarity_index = float(value) except ValueError as exp: raise ValueError('Bad float/double attribute (similarity_index): %s' % exp) def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Malware_Subject_Node_A': obj_ = MalwareSubjectReferenceType.factory() obj_.build(child_) self.set_Malware_Subject_Node_A(obj_) elif nodeName_ == 'Malware_Subject_Node_B': obj_ = MalwareSubjectReferenceType.factory() obj_.build(child_) self.set_Malware_Subject_Node_B(obj_) # end class ClusterEdgeNodePairType class ClusterCompositionType(GeneratedsSuper): """The ClusterCompositionType captures the composition of a malware cluster via its edges and their respective connected nodes, as in an undirected graph.For clustering algorithms that may capture different types of scores, the score_type attribute specifies the type of score used to define the composition of this malware cluster.""" subclass = None superclass = None def __init__(self, score_type=None, Edge_Node_Pair=None): self.score_type = _cast(None, score_type) if Edge_Node_Pair is None: self.Edge_Node_Pair = [] else: self.Edge_Node_Pair = Edge_Node_Pair def factory(*args_, **kwargs_): if ClusterCompositionType.subclass: return ClusterCompositionType.subclass(*args_, **kwargs_) else: return ClusterCompositionType(*args_, **kwargs_) factory = staticmethod(factory) def get_Edge_Node_Pair(self): return self.Edge_Node_Pair def set_Edge_Node_Pair(self, Edge_Node_Pair): self.Edge_Node_Pair = Edge_Node_Pair def add_Edge_Node_Pair(self, value): self.Edge_Node_Pair.append(value) def insert_Edge_Node_Pair(self, index, value): self.Edge_Node_Pair[index] = value def get_score_type(self): return self.score_type def set_score_type(self, score_type): self.score_type = score_type def hasContent_(self): if ( self.Edge_Node_Pair ): return True else: return False def export(self, write, level, namespace_='maecPackage:', name_='ClusterCompositionType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(write, level, pretty_print) write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(write, level, already_processed, namespace_, name_='ClusterCompositionType') if self.hasContent_(): write('>%s' % (eol_, )) self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(write, level, pretty_print) write('</%s%s>%s' % (namespace_, name_, eol_)) else: write('/>%s' % (eol_, )) def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ClusterCompositionType'): if self.score_type is not None and 'score_type' not in already_processed: already_processed.add('score_type') write(' score_type=%s' % (quote_attrib(self.score_type))) def exportChildren(self, write, level, namespace_='maecPackage:', name_='ClusterCompositionType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Edge_Node_Pair_ in self.Edge_Node_Pair: Edge_Node_Pair_.export(write, level, 'maecPackage:', name_='Edge_Node_Pair', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value = find_attr_value_('score_type', node) if value is not None and 'score_type' not in already_processed: already_processed.add('score_type') self.score_type = value def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Edge_Node_Pair': obj_ = ClusterEdgeNodePairType.factory() obj_.build(child_) self.Edge_Node_Pair.append(obj_) # end class ClusterCompositionType class ClusteringAlgorithmParametersType(GeneratedsSuper): """The ClusteringAlgorithmParametersType captures any parameters that may have been used in a malware clustering algorithm.""" subclass = None superclass = None def __init__(self, Distance_Threshold=None, Number_of_Iterations=None): self.Distance_Threshold = Distance_Threshold self.Number_of_Iterations = Number_of_Iterations def factory(*args_, **kwargs_): if ClusteringAlgorithmParametersType.subclass: return ClusteringAlgorithmParametersType.subclass(*args_, **kwargs_) else: return ClusteringAlgorithmParametersType(*args_, **kwargs_) factory = staticmethod(factory) def get_Distance_Threshold(self): return self.Distance_Threshold def set_Distance_Threshold(self, Distance_Threshold): self.Distance_Threshold = Distance_Threshold def get_Number_of_Iterations(self): return self.Number_of_Iterations def set_Number_of_Iterations(self, Number_of_Iterations): self.Number_of_Iterations = Number_of_Iterations def hasContent_(self): if ( self.Distance_Threshold is not None or self.Number_of_Iterations is not None ): return True else: return False def export(self, write, level, namespace_='maecPackage:', name_='ClusteringAlgorithmParametersType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(write, level, pretty_print) write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(write, level, already_processed, namespace_, name_='ClusteringAlgorithmParametersType') if self.hasContent_(): write('>%s' % (eol_, )) self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(write, level, pretty_print) write('</%s%s>%s' % (namespace_, name_, eol_)) else: write('/>%s' % (eol_, )) def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ClusteringAlgorithmParametersType'): pass def exportChildren(self, write, level, namespace_='maecPackage:', name_='ClusteringAlgorithmParametersType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Distance_Threshold is not None: showIndent(write, level, pretty_print) write('<%sDistance_Threshold>%s</%sDistance_Threshold>%s' % ('maecPackage:', self.gds_format_float(self.Distance_Threshold, input_name='Distance_Threshold'), 'maecPackage:', eol_)) if self.Number_of_Iterations is not None: showIndent(write, level, pretty_print) write('<%sNumber_of_Iterations>%s</%sNumber_of_Iterations>%s' % ('maecPackage:', self.gds_format_integer(self.Number_of_Iterations, input_name='Number_of_Iterations'), 'maecPackage:', eol_)) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Distance_Threshold': sval_ = child_.text try: fval_ = float(sval_) except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires float or double: %s' % exp) fval_ = self.gds_validate_float(fval_, node, 'Distance_Threshold') self.Distance_Threshold = fval_ elif nodeName_ == 'Number_of_Iterations': sval_ = child_.text try: ival_ = int(sval_) except (TypeError, ValueError) as exp: raise_parse_error(child_, 'requires integer: %s' % exp) if ival_ <= 0: raise_parse_error(child_, 'requires positiveInteger') ival_ = self.gds_validate_integer(ival_, node, 'Number_of_Iterations') self.Number_of_Iterations = ival_ # end class ClusteringAlgorithmParametersType class NetworkInfrastructureType(GeneratedsSuper): """The NetworkInfrastructureType captures specific details about the network infrastructure used in the malware analysis environment.""" subclass = None superclass = None def __init__(self, Captured_Protocols=None): self.Captured_Protocols = Captured_Protocols def factory(*args_, **kwargs_): if NetworkInfrastructureType.subclass: return NetworkInfrastructureType.subclass(*args_, **kwargs_) else: return NetworkInfrastructureType(*args_, **kwargs_) factory = staticmethod(factory) def get_Captured_Protocols(self): return self.Captured_Protocols def set_Captured_Protocols(self, Captured_Protocols): self.Captured_Protocols = Captured_Protocols def hasContent_(self): if ( self.Captured_Protocols is not None ): return True else: return False def export(self, write, level, namespace_='maecPackage:', name_='NetworkInfrastructureType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(write, level, pretty_print) write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(write, level, already_processed, namespace_, name_='NetworkInfrastructureType') if self.hasContent_(): write('>%s' % (eol_, )) self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(write, level, pretty_print) write('</%s%s>%s' % (namespace_, name_, eol_)) else: write('/>%s' % (eol_, )) def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='NetworkInfrastructureType'): pass def exportChildren(self, write, level, namespace_='maecPackage:', name_='NetworkInfrastructureType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Captured_Protocols is not None: self.Captured_Protocols.export(write, level, 'maecPackage:', name_='Captured_Protocols', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False): if nodeName_ == 'Captured_Protocols': obj_ = CapturedProtocolListType.factory() obj_.build(child_) self.set_Captured_Protocols(obj_) # end class NetworkInfrastructureType class ActionEquivalenceType(GeneratedsSuper): """The ActionEquivalenceType relates any Actions that are equivalent to each other, e.g., those that were found for the same Malware Subject when using different analysis tools. It can be used as a way of referencing equivalent actions as a single unit, such as for specifying the Action composition of a Behavior.The required id field specifies the ID for the Action Equivalence, and must be of the format specified by the ActionEquivalenceIDPattern type.""" subclass = None superclass = None def __init__(self, id=None, Action_Reference=None): self.id = _cast(None, id) if Action_Reference is None: self.Action_Reference = [] else: self.Action_Reference = Action_Reference def factory(*args_, **kwargs_): if ActionEquivalenceType.subclass: return ActionEquivalenceType.subclass(*args_, **kwargs_) else: return ActionEquivalenceType(*args_, **kwargs_) factory = staticmethod(factory) def get_Action_Reference(self): return self.Action_Reference def set_Action_Reference(self, Action_Reference): self.Action_Reference = Action_Reference def add_Action_Reference(self, value): self.Action_Reference.append(value) def insert_Action_Reference(self, index, value): self.Action_Reference[index] = value def get_id(self): return self.id def set_id(self, id): self.id = id def hasContent_(self): if ( self.Action_Reference ): return True else: return False def export(self, write, level, namespace_='maecPackage:', name_='ActionEquivalenceType', namespacedef_='', pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' showIndent(write, level, pretty_print) write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(write, level, already_processed, namespace_, name_='ActionEquivalenceType') if self.hasContent_(): write('>%s' % (eol_, )) self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print) showIndent(write, level, pretty_print) write('</%s%s>%s' % (namespace_, name_, eol_)) else: write('/>%s' % (eol_, )) def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ActionEquivalenceType'): if self.id is not None and 'id' not in already_processed: already_processed.add('id') write(' id=%s' % (quote_attrib(self.id), )) def exportChildren(self, write, level, namespace_='maecPackage:', name_='ActionEquivalenceType', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for Action_Reference_ in self.Action_Reference: Action_Reference_.export(write, level, 'maecPackage:', name_='Action_Reference', pretty_print=pretty_print) def build(self, node): self.__sourcenode__ = node already_processed = set() self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_) def buildAttributes(self, node, attrs, already_processed): value =
Select appropriate response code grpc.StatusCode.INVALID_ARGUMENT ) # 2. Prepare response response = trolley_pb2.TrolleyContent( total_count=3, total_price=57.5, content=[ # List of gRPC messages trolley_pb2.Item(name="Blue car", unit_price=3.50, count=6), trolley_pb2.Item(name="Red car", unit_price=12.0, count=2), trolley_pb2.Item(name="Grey car", unit_price=12.5, count=1), ] ) # Send response return response # To capture all errors (& prevent failure): except BaseException as error: # noqa context.set_details("system error") context.set_code(grpc.StatusCode.INTERNAL) # Internally log error LOGGER.critical( f"internal error when processing {request}" ) logging.critical(error) logging.error(traceback.format_exc()) # Send response (with error definition) return trolley_pb2.TrolleyContent() # =================================================== # SPIN-UP SERVER # =================================================== def serve(): \"""Main application loop\""" server = grpc.server( futures.ThreadPoolExecutor(max_workers=4) ) trolley_pb2_grpc.add_TrolleyServicer_to_server( Trolley(), server ) server.add_insecure_port(f'[::]:{GRPC_PORT}') server.start() server.wait_for_termination() if __name__ == '__main__': # Start server LOGGER.info("starting server") serve()</code></pre> <p>As you can see, the most important part of the server is the extension of TrolleyServicer. Servicer classes are related to a particular service (interface), and the logic of the server is implemented in their extension. The class that inherits from Servicer has to override all methods (RPC procedures). It is also essential to add some standard error handling (and understand return codes). The function <code>serve</code> is the server main loop - instantiates server on a particular address and port.</p> <p>Now, let's move to the client-side. Consider the following example that sends a request and print incoming results. It also contains some fundamental error handling on the client-side.</p> <pre class="code"><code>import grpc import trolley_pb2_grpc import trolley_pb2 # Open gRPC channel with grpc.insecure_channel( # Address of gRPC server f'localhost:{GRPC_PORT}' ) as channel: # Create gRPC Stub stub = trolley_pb2_grpc.TrolleyStub(channel) # Get content content = stub.GetContent( trolley_pb2.User(name="John", UUID=EXISTING_UUID) ) # Process response: print(f"Count: {content.total_count} \\n" f"Price: {content.total_price}") for item in content.content: print(f"Name: {item.name} \\n" f"Unit price: {item.unit_price} \\n" f"Count: {item.count}") # Now let's handle error request try: content = stub.GetContent( trolley_pb2.User(name="Peter", UUID=WRONG_UUID) ) except grpc.RpcError as e: if e.code() == grpc.StatusCode.INVALID_ARGUMENT: print("Special handling for the code") print(f"error when processing request: {e.details()}")</code></pre> <p>The most important entity is the stub (the client-side equivalent of a servicer). First, the client has to instantiate stubs with the channel directly (it is a parameter of the constructor). Then there is a part that uses stub for sending requests to the server.<</p> <p>When you instantiate stub, the important thing to notice is that no connection is immediately created. That is done later when the concrete procedure is called. Another thing that is good to bear in mind is the retry policy - there is no default retry policy configured. You have to do this manually (read the documentation to see more) - and it is important to do so.</p> <p>Working with gRPC is often challenging. For example, the automatic reboot of a server (when anything changes) is not supported. This is inconvenient because you need to restart the server manually when anything changes. Also, many helpful tools like Postman are not available. There is, however, a command-line equivalent of curl (called grpcurl) - that can help a lot when debugging.</p> <p>There are ways for using gRPC to serialize data directly to a disk - the logic of gRPC proto buffers is the same. However, as the name suggests, gRPC is mainly the RPC framework. It is most widespread if it comes to communication between services.</p> <h3>Apache Thrift</h3> <p>Facebook developed Thrift in 2008; it became an open-source project in 2010 (known as Apache Thrift). It is a quite popular framework - technically, it is the equivalent of gRPC. It is up to every developer team to choose their preferred RPC framework.</p> <p>Thrift's interface definition language and RPC protocol are both called Apache Thrift (technically equivalent gRPC and protocol buffer). Thrift allows using additional configuration options (like nonbinary communication using text serialization), making it a bit more complex in some ways than gRPC. But generally, what is true about gRPC is also true about Thrift.</p> <p>Regarding the example described above, the interface definition would look like this (file <code>trolley.thrift</code>):</p> <pre class="code"><code>struct Item { 1: string name, 2: double unit_price, 3: i32 count } struct TrolleyContent { 1: list<Item> content, 2: i32 total_count, 3: double total_price } struct User { 1: string name, 2: string UUID } service Trolley { TrolleyContent GetContent(1: User user) }</code></pre> <p>You can immediately see many similarities with the protocol buffer file. However, this structure is slightly more complex (including possibilities to return arrays in services).</p> <p>To generate library files, use the command:</p> <pre class="code"><code>thrift -r --gen py trolley.thrift</code></pre> <p>If we continue with our example, the code on the server-side will look like this:</p> <pre class="code"><code>import logging import sys # Add path to generated script sys.path.append('gen-py') from trolley import Trolley from trolley.ttypes import Item, TrolleyContent, User from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol from thrift.server import TServer from thrift.Thrift import TException LOGGER = logging.getLogger(__file__) # ============================================ # MAIN SERVER CLASS # ============================================ class TrolleyHandler: \""" Server class for serving Thrift Trolley service requests \""" def GetContent(self, user: User) -> TrolleyContent: # 1. Do some validation of inputs valid_uuids = SELECT_FROM_DB if user.UUID not in valid_uuids: # Prepare error message (if relevant) raise TException("Wrong UUID") # 2. Prepare response response = TrolleyContent( total_count=3, total_price=57.5, content=[ # List of Thrift messages Item(name="Blue car", unit_price=3.50, count=6), Item(name="Red car", unit_price=12.0, count=2), Item(name="Grey car", unit_price=12.5, count=1), ] ) # Send response return response # ============================================ # SPIN-UP SERVER # ============================================ def serve(): handler = TrolleyHandler() processor = Trolley.Processor(handler) transport = TSocket.TServerSocket(host=THRIFT_HOST, port=THRIFT_PORT) tfactory = TTransport.TBufferedTransportFactory() pfactory = TBinaryProtocol.TBinaryProtocolFactory() server = TServer.TSimpleServer( processor, transport, tfactory, pfactory ) # You could do one of these for a multithreaded server # server = TServer.TThreadedServer( # processor, transport, tfactory, pfactory) # server = TServer.TThreadPoolServer( # processor, transport, tfactory, pfactory) server.serve() if __name__ == '__main__': # Start server LOGGER.info('Starting the server') serve() LOGGER.info('done')</code></pre> <p>Again, even here, the resemblance to gRPC is considerable. The main server-class overrides all implemented methods (services) defined in the Thrift file. The handling of data types is more natural (as types behave like real classes). Handling error is more straightforward - you can raise something inherited from TException and catch it on the client-side.</p> <p>Client example:</p> <pre class="code"><code>import sys sys.path.append('gen-py') from trolley import Trolley from trolley.ttypes import Item, TrolleyContent, User from thrift.Thrift import TException from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol def main(): # Make socket transport = TSocket.TSocket(THRIFT_HOST, THRIFT_PORT) # Buffering is critical. Raw sockets are very slow transport = TTransport.TBufferedTransport(transport) # Wrap in a protocol protocol = TBinaryProtocol.TBinaryProtocol(transport) # Create a client to use the protocol encoder client = Trolley.Client(protocol) # Connect! transport.open() # Call request (with context) user = User(name="Some real user", UUID=EXISTING_UUID) trolley_content: TrolleyContent = client.GetContent(user) # Process response print(f"Count: {trolley_content.total_count} \\n" f"Price: {trolley_content.total_price}") for item in trolley_content.content: print(f"Name: {item.name}" f"Unit price: {item.unit_price}", f"Count: {item.count}") # Error handling: try: user = User(name="Some nonexisting user", UUID=WRONG_UUID) client.GetContent(user) except TException as e: print(e) # Close! transport.close() if __name__ == '__main__': # Call client main()</code></pre> <p>Client code is again very similar to gRPC client; creating connection is more complex (3 lines more), but the rest is almost the same. In addition, error handling is more straightforward.</p> <p>Similarly, as gRPC, Apache Thrift can be used to serialize data to disk. As is mentioned above, you can easily find ready-made examples of how to do it. However, the principle is the same (you still need to define data structures in Thrift's interface definition language).</p> <h2>Summary</h2> <p>One of the common challenges when storing or sending data on the network is effectively serializing them (converting them into technically suitable representation). Binary serialization is one of the essential parts of most Remote Procedure Call (RPC) frameworks. The most popular RPC frameworks are gRPC and Apache Thrift. The principle of interface definition language is to describe interface (methods and data types that the subject of transfer). The IDL of gRPC is called protocol buffers. Apache Thrift uses IDL with the same name (Thrift file). There are, of course, many other similar technologies - like Apache Avro, designed for specific purposes (and extended to support RPC).</p> """ ENTITY = cr.Article( title="Technical possibilities in binary serialization and RPC", url_alias='technical-possibilities-in-binary-serialization-and-rpc', large_image_path="images/protocol_big.jpg", small_image_path="images/protocol_small.jpg", date=datetime.datetime(2021, 10, 24), tags=[cr.Tag('Python', 'python'), cr.Tag('Design', 'design'), cr.Tag('Programming', 'programming'), cr.Tag('Performance', 'performance'), cr.Tag('Essentials', 'essentials')], content=content, lead=lead, description="Remote Procedure Call
<filename>image/controllers.py # image/controllers.py # Brought to you by We Vote. Be good. # -*- coding: UTF-8 -*- import requests import wevote_functions.admin from .functions import analyze_remote_url, analyze_image_file, analyze_image_in_memory from .models import WeVoteImageManager, WeVoteImage, \ CHOSEN_FAVICON_NAME, CHOSEN_LOGO_NAME, CHOSEN_SOCIAL_SHARE_IMAGE_NAME, \ FACEBOOK_PROFILE_IMAGE_NAME, FACEBOOK_BACKGROUND_IMAGE_NAME, \ TWITTER_PROFILE_IMAGE_NAME, TWITTER_BACKGROUND_IMAGE_NAME, TWITTER_BANNER_IMAGE_NAME, MAPLIGHT_IMAGE_NAME, \ VOTE_SMART_IMAGE_NAME, MASTER_IMAGE, ISSUE_IMAGE_NAME, BALLOTPEDIA_IMAGE_NAME, CAMPAIGNX_PHOTO_IMAGE_NAME, \ LINKEDIN_IMAGE_NAME, WIKIPEDIA_IMAGE_NAME from candidate.models import CandidateManager from config.base import get_environment_variable from django.db.models import Q from import_export_facebook.models import FacebookManager from issue.models import IssueManager from organization.models import OrganizationManager from politician.models import PoliticianManager from position.controllers import reset_all_position_image_details_from_candidate, \ reset_position_for_friends_image_details_from_voter, reset_position_entered_image_details_from_organization, \ update_all_position_details_from_candidate from twitter.functions import retrieve_twitter_user_info from twitter.models import TwitterUserManager from voter.models import VoterManager, VoterDeviceLink, VoterDeviceLinkManager, VoterAddressManager, VoterAddress, Voter from voter_guide.models import VoterGuideManager from wevote_functions.functions import positive_value_exists, convert_to_int logger = wevote_functions.admin.get_logger(__name__) HTTP_OK = 200 # These constants are used for "image_source" which is not a WeVoteImage table value, but gets used in the controller # to set the table values like: kind_of_image_twitter_profile and kind_of_image_facebook_profile # code. "other_source" is a database table value that is not given its own "kind_of_image..." table boolean TWITTER = "twitter" FACEBOOK = "facebook" MAPLIGHT = "maplight" VOTE_SMART = "vote_smart" BALLOTPEDIA_IMAGE_SOURCE = "ballotpedia" LINKEDIN = "linkedin" WIKIPEDIA = "wikipedia" OTHER_SOURCE = "other_source" # Set "kind_of_image_other_source" to true MAPLIGHT_URL_NOT_FOUND = "maplight url not found" VOTE_SMART_URL_NOT_FOUND = "votesmart url not found" BALLOTPEDIA_URL_NOT_FOUND = "ballotpedia url not found" CAMPAIGNX_PHOTO_URL_NOT_FOUND = "campaignx photo url not found" LINKEDIN_URL_NOT_FOUND = "linkedin url not found" WIKIPEDIA_URL_NOT_FOUND = "wikipedia url not found" OTHER_SOURCE_URL_NOT_FOUND = "other source url not found" FACEBOOK_USER_DOES_NOT_EXIST = "facebook user does not exist" FACEBOOK_URL_NOT_FOUND = "facebook url not found" TWITTER_USER_DOES_NOT_EXIST = "twitter user does not exist" TWITTER_URL_NOT_FOUND = "twitter url not found" IMAGE_ALREADY_CACHED = "image already cached" ALL_KIND_OF_IMAGE = ['kind_of_image_twitter_profile', 'kind_of_image_twitter_background', 'kind_of_image_twitter_banner', 'kind_of_image_facebook_profile', 'kind_of_image_facebook_background', 'kind_of_image_maplight', 'kind_of_image_vote_smart'] # Search for in campaign/controllers.py as well # Facebook shared image: 1200 x 630 # Facebook shared link: 1200 x 628 # Tweet with image in shared link: 1200 x 628 # Tweet with single image: 1200 x 675 (Twitter recommended aspect ratio is 16:9) CAMPAIGN_PHOTO_ORIGINAL_MAX_WIDTH = 1200 CAMPAIGN_PHOTO_ORIGINAL_MAX_HEIGHT = 628 CAMPAIGN_PHOTO_LARGE_MAX_WIDTH = 575 CAMPAIGN_PHOTO_LARGE_MAX_HEIGHT = 301 CAMPAIGN_PHOTO_MEDIUM_MAX_WIDTH = 224 CAMPAIGN_PHOTO_MEDIUM_MAX_HEIGHT = 117 CAMPAIGN_PHOTO_SMALL_MAX_WIDTH = 140 CAMPAIGN_PHOTO_SMALL_MAX_HEIGHT = 73 PROFILE_IMAGE_LARGE_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_LARGE_WIDTH")) PROFILE_IMAGE_LARGE_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_LARGE_HEIGHT")) PROFILE_IMAGE_MEDIUM_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_MEDIUM_WIDTH")) PROFILE_IMAGE_MEDIUM_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_MEDIUM_HEIGHT")) PROFILE_IMAGE_TINY_WIDTH = convert_to_int(get_environment_variable("PROFILE_IMAGE_TINY_WIDTH")) PROFILE_IMAGE_TINY_HEIGHT = convert_to_int(get_environment_variable("PROFILE_IMAGE_TINY_HEIGHT")) ISSUES_IMAGE_LARGE_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_LARGE_WIDTH")) ISSUES_IMAGE_LARGE_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_LARGE_HEIGHT")) ISSUES_IMAGE_MEDIUM_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_MEDIUM_WIDTH")) ISSUES_IMAGE_MEDIUM_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_MEDIUM_HEIGHT")) ISSUES_IMAGE_TINY_WIDTH = convert_to_int(get_environment_variable("ISSUES_IMAGE_TINY_WIDTH")) ISSUES_IMAGE_TINY_HEIGHT = convert_to_int(get_environment_variable("ISSUES_IMAGE_TINY_HEIGHT")) AWS_STORAGE_BUCKET_NAME = get_environment_variable("AWS_STORAGE_BUCKET_NAME") try: SOCIAL_BACKGROUND_IMAGE_WIDTH = convert_to_int(get_environment_variable("SOCIAL_BACKGROUND_IMAGE_WIDTH")) SOCIAL_BACKGROUND_IMAGE_HEIGHT = convert_to_int(get_environment_variable("SOCIAL_BACKGROUND_IMAGE_HEIGHT")) except Exception: # In case not defined in a dev environment, use the default values which come from the Sept 2017 size of the react # image class="organization-banner-image-img" logger.error( "SOCIAL_BACKGROUND_IMAGE_WIDTH and/or SOCIAL_BACKGROUND_IMAGE_HEIGHT not defined in environment_variables.") SOCIAL_BACKGROUND_IMAGE_HEIGHT = 200 # HTML x SOCIAL_BACKGROUND_IMAGE_WIDTH = 900 # HTML y def cache_all_kind_of_images_locally_for_all_organizations(): """ Cache all kind of images locally for all organizations :return: """ cache_images_locally_for_all_organizations_results = [] # TODO Update this for organizations # voter_list = Voter.objects.all() # # # If there is a value in twitter_id OR facebook_id, return the voter # image_filters = [] # new_filter = Q(twitter_id__isnull=False) # image_filters.append(new_filter) # new_filter = Q(facebook_id__isnull=False) # image_filters.append(new_filter) # # # Add the first query # final_filters = image_filters.pop() # # # ...and "OR" the remaining items in the list # for item in image_filters: # final_filters |= item # # # voter_list = voter_list.filter(final_filters) # voter_list = voter_list.order_by('-is_admin', '-is_verified_volunteer', 'facebook_email', 'twitter_screen_name', # 'last_name', 'first_name') # voter_list = voter_list[:200] # Limit to 200 for now # # for voter in voter_list: # cache_images_for_one_organization_results = migrate_remote_voter_image_urls_to_local_cache(voter.id) # cache_images_locally_for_all_organizations_results.append(cache_images_for_one_organization_results) return cache_images_locally_for_all_organizations_results def cache_all_kind_of_images_locally_for_all_voters(): """ Cache all kind of images locally for all voters :return: """ cache_images_locally_for_all_voters_results = [] voter_list = Voter.objects.all() # If there is a value in twitter_id OR facebook_id, return the voter image_filters = [] new_filter = Q(twitter_id__isnull=False) image_filters.append(new_filter) new_filter = Q(facebook_id__isnull=False) image_filters.append(new_filter) # Add the first query final_filters = image_filters.pop() # ...and "OR" the remaining items in the list for item in image_filters: final_filters |= item # voter_list = voter_list.filter(final_filters) voter_list = voter_list.order_by('-is_admin', '-is_verified_volunteer', 'facebook_email', 'twitter_screen_name', 'last_name', 'first_name') voter_list = voter_list[:200] # Limit to 200 for now for voter in voter_list: cache_images_for_a_voter_results = cache_voter_master_images(voter.id) cache_images_locally_for_all_voters_results.append(cache_images_for_a_voter_results) return cache_images_locally_for_all_voters_results def cache_image_if_not_cached( google_civic_election_id=0, image_url_https='', voter_we_vote_id=None, candidate_we_vote_id=None, organization_we_vote_id=None, issue_we_vote_id=None, twitter_id=None, twitter_screen_name=None, facebook_user_id=None, maplight_id=None, vote_smart_id=None, is_active_version=False, kind_of_image_twitter_profile=False, kind_of_image_twitter_background=False, kind_of_image_twitter_banner=False, kind_of_image_facebook_profile=False, kind_of_image_facebook_background=False, kind_of_image_maplight=False, kind_of_image_vote_smart=False, kind_of_image_issue=False, kind_of_image_ballotpedia_profile=False, kind_of_image_linkedin_profile=False, kind_of_image_wikipedia_profile=False, kind_of_image_other_source=False, kind_of_image_original=False, facebook_background_image_offset_x=None, facebook_background_image_offset_y=None, other_source=None): """ Check if image is already cached or not. If not then cached it. :param google_civic_election_id: :param image_url_https: :param voter_we_vote_id: :param candidate_we_vote_id: :param organization_we_vote_id: :param issue_we_vote_id: :param twitter_id: :param twitter_screen_name: :param facebook_user_id: :param maplight_id: :param vote_smart_id: :param is_active_version: :param kind_of_image_twitter_profile: :param kind_of_image_twitter_background: :param kind_of_image_twitter_banner: :param kind_of_image_facebook_profile: :param kind_of_image_facebook_background: :param kind_of_image_maplight: :param kind_of_image_vote_smart: :param kind_of_image_issue: :param kind_of_image_ballotpedia_profile: :param kind_of_image_linkedin_profile: :param kind_of_image_wikipedia_profile: :param kind_of_image_other_source: :param kind_of_image_original: :param facebook_background_image_offset_x: :param facebook_background_image_offset_y: :param other_source: :return: """ we_vote_image_manager = WeVoteImageManager() cached_we_vote_image_results = we_vote_image_manager.retrieve_recent_cached_we_vote_image( voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id, organization_we_vote_id=organization_we_vote_id, issue_we_vote_id=issue_we_vote_id, kind_of_image_twitter_profile=kind_of_image_twitter_profile, kind_of_image_twitter_background=kind_of_image_twitter_background, kind_of_image_twitter_banner=kind_of_image_twitter_banner, kind_of_image_facebook_profile=kind_of_image_facebook_profile, kind_of_image_facebook_background=kind_of_image_facebook_background, kind_of_image_maplight=kind_of_image_maplight, kind_of_image_vote_smart=kind_of_image_vote_smart, kind_of_image_issue=kind_of_image_issue, kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile, kind_of_image_linkedin_profile=kind_of_image_linkedin_profile, kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile, kind_of_image_other_source=kind_of_image_other_source, kind_of_image_original=kind_of_image_original, is_active_version=True) # If recent cached image matches with the current one the image is already cached cached_we_vote_image = cached_we_vote_image_results['we_vote_image'] if cached_we_vote_image_results['we_vote_image_found'] and \ image_url_https == cached_we_vote_image.twitter_profile_image_url_https or \ image_url_https == cached_we_vote_image.twitter_profile_background_image_url_https or \ image_url_https == cached_we_vote_image.twitter_profile_banner_url_https or \ image_url_https == cached_we_vote_image.facebook_profile_image_url_https or \ image_url_https == cached_we_vote_image.facebook_background_image_url_https or \ image_url_https == cached_we_vote_image.maplight_image_url_https or \ image_url_https == cached_we_vote_image.vote_smart_image_url_https or \ image_url_https == cached_we_vote_image.issue_image_url_https or \ image_url_https == cached_we_vote_image.ballotpedia_profile_image_url or \ image_url_https == cached_we_vote_image.linkedin_profile_image_url or \ image_url_https == cached_we_vote_image.wikipedia_profile_image_url or \ image_url_https == cached_we_vote_image.other_source_image_url: cache_image_results = IMAGE_ALREADY_CACHED else: # Image is not cached so caching it cache_image_locally_results = cache_image_locally( google_civic_election_id=google_civic_election_id, image_url_https=image_url_https, voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id, organization_we_vote_id=organization_we_vote_id, issue_we_vote_id=issue_we_vote_id, twitter_id=twitter_id, facebook_user_id=facebook_user_id, maplight_id=maplight_id, vote_smart_id=vote_smart_id, twitter_screen_name=twitter_screen_name, is_active_version=is_active_version, kind_of_image_twitter_profile=kind_of_image_twitter_profile, kind_of_image_twitter_background=kind_of_image_twitter_background, kind_of_image_twitter_banner=kind_of_image_twitter_banner, kind_of_image_facebook_profile=kind_of_image_facebook_profile, kind_of_image_facebook_background=kind_of_image_facebook_background, kind_of_image_maplight=kind_of_image_maplight, kind_of_image_vote_smart=kind_of_image_vote_smart, kind_of_image_issue=kind_of_image_issue, kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile, kind_of_image_linkedin_profile=kind_of_image_linkedin_profile, kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile, kind_of_image_other_source=kind_of_image_other_source, kind_of_image_original=kind_of_image_original, facebook_background_image_offset_x=facebook_background_image_offset_x, facebook_background_image_offset_y=facebook_background_image_offset_y, other_source=other_source, ) cache_image_results = cache_image_locally_results['success'] if cache_image_results: set_active_version_false_results = we_vote_image_manager.set_active_version_false_for_other_images( voter_we_vote_id=voter_we_vote_id, candidate_we_vote_id=candidate_we_vote_id, organization_we_vote_id=organization_we_vote_id, issue_we_vote_id=issue_we_vote_id, image_url_https=image_url_https, kind_of_image_twitter_profile=kind_of_image_twitter_profile, kind_of_image_twitter_background=kind_of_image_twitter_background, kind_of_image_twitter_banner=kind_of_image_twitter_banner, kind_of_image_facebook_profile=kind_of_image_facebook_profile, kind_of_image_facebook_background=kind_of_image_facebook_background, kind_of_image_maplight=kind_of_image_maplight, kind_of_image_vote_smart=kind_of_image_vote_smart, kind_of_image_issue=kind_of_image_issue, kind_of_image_ballotpedia_profile=kind_of_image_ballotpedia_profile, kind_of_image_linkedin_profile=kind_of_image_linkedin_profile, kind_of_image_wikipedia_profile=kind_of_image_wikipedia_profile, kind_of_image_other_source=kind_of_image_other_source,) return cache_image_results def cache_organization_master_images(organization_we_vote_id): """ Cache all kind of master images for an organization such as profile, background :param organization_we_vote_id: :return: """ cache_all_kind_of_images_results = { 'organization_we_vote_id': "", 'cached_twitter_profile_image': False, 'cached_twitter_background_image': False, 'cached_twitter_banner_image': False, 'cached_facebook_profile_image': False, 'cached_facebook_background_image': False } google_civic_election_id = 0 twitter_id = None organization_manager = OrganizationManager() organization_results = organization_manager.retrieve_organization_from_we_vote_id(organization_we_vote_id) if not organization_results['organization_found']: return cache_all_kind_of_images_results organization = organization_results['organization'] organization_we_vote_id = organization.we_vote_id if positive_value_exists(organization_we_vote_id): cache_all_kind_of_images_results['organization_we_vote_id'] = organization_we_vote_id else: return cache_all_kind_of_images_results twitter_user_manager = TwitterUserManager() twitter_screen_name = '' twitter_link_to_organization_results = \ twitter_user_manager.retrieve_twitter_link_to_organization_from_organization_we_vote_id(organization_we_vote_id) if twitter_link_to_organization_results['twitter_link_to_organization_found']: twitter_link_to_organization = twitter_link_to_organization_results['twitter_link_to_organization'] twitter_id = twitter_link_to_organization.twitter_id twitter_screen_name = twitter_link_to_organization.fetch_twitter_handle_locally_or_remotely() if not positive_value_exists(twitter_id): cache_all_kind_of_images_results = { 'organization_we_vote_id': organization_we_vote_id, 'organization': organization, 'cached_twitter_profile_image': TWITTER_USER_DOES_NOT_EXIST, 'cached_twitter_background_image': TWITTER_USER_DOES_NOT_EXIST, 'cached_twitter_banner_image': TWITTER_USER_DOES_NOT_EXIST, } return cache_all_kind_of_images_results # Retrieve latest twitter image urls from Twitter latest_image_urls_results = retrieve_image_urls_from_twitter(twitter_id) twitter_profile_image_url_https = latest_image_urls_results['latest_twitter_profile_image_url'] twitter_profile_background_image_url_https = latest_image_urls_results['latest_twitter_background_image_url'] twitter_profile_banner_url_https = latest_image_urls_results['latest_twitter_banner_image_url'] # Cache all images if not already cached if not twitter_profile_image_url_https: cache_all_kind_of_images_results['cached_twitter_profile_image'] = TWITTER_URL_NOT_FOUND else: cache_all_kind_of_images_results['cached_twitter_profile_image'] = cache_image_if_not_cached( google_civic_election_id, twitter_profile_image_url_https, organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True, kind_of_image_twitter_profile=True, kind_of_image_original=True) if not twitter_profile_background_image_url_https: cache_all_kind_of_images_results['cached_twitter_background_image'] = TWITTER_URL_NOT_FOUND else: cache_all_kind_of_images_results['cached_twitter_background_image'] = cache_image_if_not_cached( google_civic_election_id, twitter_profile_background_image_url_https, organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True, kind_of_image_twitter_background=True, kind_of_image_original=True) if not twitter_profile_banner_url_https: cache_all_kind_of_images_results['cached_twitter_banner_image'] = TWITTER_URL_NOT_FOUND else: cache_all_kind_of_images_results['cached_twitter_banner_image'] = cache_image_if_not_cached( google_civic_election_id, twitter_profile_banner_url_https, organization_we_vote_id=organization_we_vote_id, twitter_id=twitter_id, twitter_screen_name=twitter_screen_name, is_active_version=True, kind_of_image_twitter_banner=True, kind_of_image_original=True) return cache_all_kind_of_images_results def cache_voter_master_images(voter_id): """ Cache all kind of images locally for a voter such as profile, background :param voter_id: :return: """ cache_all_kind_of_images_results = { 'voter_id': voter_id, 'voter_we_vote_id': "", 'cached_twitter_profile_image': False, 'cached_twitter_background_image': False, 'cached_twitter_banner_image': False, 'cached_facebook_profile_image': False, 'cached_facebook_background_image': False } google_civic_election_id = 0 twitter_id = None facebook_id = None voter_address_manager = VoterAddressManager() voter_manager = VoterManager() voter_device_link_manager = VoterDeviceLinkManager() voter_results = voter_manager.retrieve_voter_by_id(voter_id) if not voter_results['voter_found']: return cache_all_kind_of_images_results voter = voter_results['voter'] if positive_value_exists(voter.we_vote_id): cache_all_kind_of_images_results['voter_we_vote_id'] = voter.we_vote_id # DALE 2018-06-19 I don't see why we need a google_civic_election_id for storing a voter's photos voter_device_link_results = voter_device_link_manager.retrieve_voter_device_link(0, voter_id=voter_id) if voter_device_link_results['success']: voter_device_link = voter_device_link_results['voter_device_link'] else: voter_device_link = VoterDeviceLink() voter_address_results = voter_address_manager.retrieve_address(0, voter_id) if voter_address_results['voter_address_found']: voter_address = voter_address_results['voter_address'] else: voter_address = VoterAddress() from ballot.controllers import choose_election_from_existing_data results = choose_election_from_existing_data(voter_device_link, 0, voter_address) google_civic_election_id = results['google_civic_election_id'] else: return cache_all_kind_of_images_results # DALE NOTE 2017-04-23 I don't think we want to use the twitter_id stored in the voter table # if positive_value_exists(voter.twitter_id): # twitter_id = voter.twitter_id # else: twitter_user_manager = TwitterUserManager() twitter_screen_name = '' twitter_link_to_voter_results = twitter_user_manager.retrieve_twitter_link_to_voter_from_voter_we_vote_id( voter.we_vote_id, read_only=True) if twitter_link_to_voter_results['twitter_link_to_voter_found']: twitter_link_to_voter = twitter_link_to_voter_results['twitter_link_to_voter'] twitter_id = twitter_link_to_voter.twitter_id twitter_screen_name = twitter_link_to_voter.fetch_twitter_handle_locally_or_remotely() # DALE NOTE 2017-04-23 I don't think we want to use the facebook_id stored in the voter table # if positive_value_exists(voter.facebook_id): # facebook_id = voter.facebook_id # else: facebook_manager = FacebookManager() facebook_link_to_voter_results = facebook_manager.retrieve_facebook_link_to_voter_from_voter_we_vote_id( voter.we_vote_id) if facebook_link_to_voter_results['facebook_link_to_voter_found']: facebook_id = facebook_link_to_voter_results['facebook_link_to_voter'].facebook_user_id if
word[4] != "P" and word[4] != "p" and word[5] != "P" and word[5] != "p" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "p" + ", " if guessChar == "Q" or guessChar == "q" : if word[1] == "Q" or word[1] == "q" : toGuess = toGuess[:1] + "q" + toGuess[2:] if word[2] == "Q" or word[2] == "q" : toGuess = toGuess[:2] + "q" + toGuess[3:] if word[3] == "Q" or word[3] == "q" : toGuess = toGuess[:3] + "q" + toGuess[4:] if word[4] == "Q" or word[4] == "q" : toGuess = toGuess[:4] + "q" + toGuess[5:] if word[5] == "Q" or word[5] == "q" : toGuess = toGuess[:5] + "q" + toGuess[6:] if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" and word[4] != "Q" and word[4] != "q" and word[5] != "Q" and word[5] != "q" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "q" + ", " if guessChar == "R" or guessChar == "r" : if word[1] == "R" or word[1] == "r" : toGuess = toGuess[:1] + "r" + toGuess[2:] if word[2] == "R" or word[2] == "r" : toGuess = toGuess[:2] + "r" + toGuess[3:] if word[3] == "R" or word[3] == "r" : toGuess = toGuess[:3] + "r" + toGuess[4:] if word[4] == "R" or word[4] == "r" : toGuess = toGuess[:4] + "r" + toGuess[5:] if word[5] == "R" or word[5] == "r" : toGuess = toGuess[:5] + "r" + toGuess[6:] if word[1] != "R" and word[1] != "r" and word[2] != "R" and word[2] != "r" and word[3] != "R" and word[3] != "r" and word[4] != "R" and word[4] != "r" and word[5] != "R" and word[5] != "r" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "r" + ", " if guessChar == "S" or guessChar == "s" : if word[1] == "S" or word[1] == "s" : toGuess = toGuess[:1] + "s" + toGuess[2:] if word[2] == "S" or word[2] == "s" : toGuess = toGuess[:2] + "s" + toGuess[3:] if word[3] == "S" or word[3] == "s" : toGuess = toGuess[:3] + "s" + toGuess[4:] if word[4] == "S" or word[4] == "s" : toGuess = toGuess[:4] + "s" + toGuess[5:] if word[5] == "S" or word[5] == "s" : toGuess = toGuess[:5] + "s" + toGuess[6:] if word[1] != "S" and word[1] != "s" and word[2] != "S" and word[2] != "s" and word[3] != "S" and word[3] != "s" and word[4] != "S" and word[4] != "s" and word[5] != "S" and word[5] != "s" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "s" + ", " if guessChar == "T" or guessChar == "t" : if word[1] == "T" or word[1] == "t" : toGuess = toGuess[:1] + "t" + toGuess[2:] if word[2] == "T" or word[2] == "t" : toGuess = toGuess[:2] + "t" + toGuess[3:] if word[3] == "T" or word[3] == "t" : toGuess = toGuess[:3] + "t" + toGuess[4:] if word[4] == "T" or word[4] == "t" : toGuess = toGuess[:4] + "t" + toGuess[5:] if word[5] == "T" or word[5] == "t" : toGuess = toGuess[:5] + "t" + toGuess[6:] if word[1] != "T" and word[1] != "t" and word[2] != "T" and word[2] != "t" and word[3] != "T" and word[3] != "t" and word[4] != "T" and word[4] != "t" and word[5] != "T" and word[5] != "t" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "t" + ", " if guessChar == "U" or guessChar == "u" : if word[1] == "U" or word[1] == "u" : toGuess = toGuess[:1] + "u" + toGuess[2:] if word[2] == "U" or word[2] == "u" : toGuess = toGuess[:2] + "u" + toGuess[3:] if word[3] == "U" or word[3] == "u" : toGuess = toGuess[:3] + "u" + toGuess[4:] if word[4] == "U" or word[4] == "u" : toGuess = toGuess[:4] + "u" + toGuess[5:] if word[5] == "U" or word[5] == "u" : toGuess = toGuess[:5] + "u" + toGuess[6:] if word[1] != "U" and word[1] != "u" and word[2] != "U" and word[2] != "u" and word[3] != "U" and word[3] != "u" and word[4] != "U" and word[4] != "u" and word[5] != "U" and word[5] != "u" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "u" + ", " if guessChar == "V" or guessChar == "v" : if word[1] == "V" or word[1] == "v" : toGuess = toGuess[:1] + "v" + toGuess[2:] if word[2] == "V" or word[2] == "v" : toGuess = toGuess[:2] + "v" + toGuess[3:] if word[3] == "V" or word[3] == "v" : toGuess = toGuess[:3] + "v" + toGuess[4:] if word[4] == "V" or word[4] == "v" : toGuess = toGuess[:4] + "v" + toGuess[5:] if word[5] == "V" or word[5] == "v" : toGuess = toGuess[:5] + "v" + toGuess[6:] if word[1] != "V" and word[1] != "v" and word[2] != "V" and word[2] != "v" and word[3] != "V" and word[3] != "v" and word[4] != "V" and word[4] != "v" and word[5] != "V" and word[5] != "v" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "v" + ", " if guessChar == "W" or guessChar == "w" : if word[1] == "W" or word[1] == "w" : toGuess = toGuess[:1] + "w" + toGuess[2:] if word[2] == "W" or word[2] == "w" : toGuess = toGuess[:2] + "w" + toGuess[3:] if word[3] == "W" or word[3] == "w" : toGuess = toGuess[:3] + "w" + toGuess[4:] if word[4] == "W" or word[4] == "w" : toGuess = toGuess[:4] + "w" + toGuess[5:] if word[5] == "W" or word[5] == "w" : toGuess = toGuess[:5] + "w" + toGuess[6:] if word[1] != "W" and word[1] != "w" and word[2] != "W" and word[2] != "w" and word[3] != "W" and word[3] != "w" and word[4] != "W" and word[4] != "w" and word[5] != "W" and word[5] != "w" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "w" + ", " if guessChar == "X" or guessChar == "x" : if word[1] == "X" or word[1] == "x" : toGuess = toGuess[:1] + "x" + toGuess[2:] if word[2] == "X" or word[2] == "x" : toGuess = toGuess[:2] + "x" + toGuess[3:] if word[3] == "X" or word[3] == "x" : toGuess = toGuess[:3] + "x" + toGuess[4:] if word[4] == "X" or word[4] == "x" : toGuess = toGuess[:4] + "x" + toGuess[5:] if word[5] == "X" or word[5] == "x" : toGuess = toGuess[:5] + "x" + toGuess[6:] if word[1] != "X" and word[1] != "x" and word[2] != "X" and word[2] != "x" and word[3] != "X" and word[3] != "x" and word[4] != "X" and word[4] != "x" and word[5] != "X" and word[5] != "x" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "x" + ", " if guessChar == "Y" or guessChar == "y" : if word[1] == "Y" or word[1] == "y" : toGuess = toGuess[:1] + "y" + toGuess[2:] if word[2] == "Y" or word[2] == "y" : toGuess = toGuess[:2] + "y" + toGuess[3:] if word[3] == "Y" or word[3] == "y" : toGuess = toGuess[:3] + "y" + toGuess[4:] if word[4] == "Y" or word[4] == "y" : toGuess = toGuess[:4] + "y" + toGuess[5:] if word[5] == "Y" or word[5] == "y" : toGuess = toGuess[:5] + "y" + toGuess[6:] if word[1] != "Y" and word[1] != "y" and word[2] != "Y" and word[2] != "y" and word[3] != "Y" and word[3] != "y" and word[4] != "Y" and word[4] != "y" and word[5] != "Y" and word[5] != "y" : print("\nWrong!\n") numberOfErrors = numberOfErrors + 1 wrongChars = wrongChars + "y" + ", " if guessChar == "Z" or guessChar == "z" : if word[1] == "Z" or word[1] == "z" : toGuess = toGuess[:1] + "z" + toGuess[2:] if word[2] == "Z" or word[2] == "z" : toGuess = toGuess[:2] + "z" + toGuess[3:] if word[3] == "Z" or word[3] == "z" : toGuess = toGuess[:3] + "z" + toGuess[4:] if word[4] == "Z" or word[4] == "z" : toGuess = toGuess[:4] + "z" + toGuess[5:] if word[5] == "Z" or word[5] == "z" : toGuess = toGuess[:5] + "z" + toGuess[6:] if word[1] != "Z" and word[1] != "z" and word[2] != "Z" and word[2] !=
due to being an optional var when calling pass_vars. if os.path.exists('/etc/freebsd-update.conf'): env.update(CFLAGS='-I/usr/local/include/') env.update(pass_vars(required=required, optional=optional)) return env def pass_vars(required, optional): # type: (t.Collection[str], t.Collection[str]) -> t.Dict[str, str] """Return a filtered dictionary of environment variables based on the current environment.""" env = {} for name in required: if name not in os.environ: raise MissingEnvironmentVariable(name) env[name] = os.environ[name] for name in optional: if name not in os.environ: continue env[name] = os.environ[name] return env def remove_tree(path): # type: (str) -> None """Remove the specified directory, siliently continuing if the directory does not exist.""" try: shutil.rmtree(to_bytes(path)) except OSError as ex: if ex.errno != errno.ENOENT: raise def is_binary_file(path): # type: (str) -> bool """Return True if the specified file is a binary file, otherwise return False.""" assume_text = { '.cfg', '.conf', '.crt', '.cs', '.css', '.html', '.ini', '.j2', '.js', '.json', '.md', '.pem', '.ps1', '.psm1', '.py', '.rst', '.sh', '.txt', '.xml', '.yaml', '.yml', } assume_binary = { '.bin', '.eot', '.gz', '.ico', '.iso', '.jpg', '.otf', '.p12', '.png', '.pyc', '.rpm', '.ttf', '.woff', '.woff2', '.zip', } ext = os.path.splitext(path)[1] if ext in assume_text: return False if ext in assume_binary: return True with open_binary_file(path) as path_fd: # noinspection PyTypeChecker return b'\0' in path_fd.read(4096) def generate_name(length=8): # type: (int) -> str """Generate and return a random name.""" return ''.join(random.choice(string.ascii_letters + string.digits) for _idx in range(length)) def generate_password(): # type: () -> str """Generate and return random password.""" chars = [ string.ascii_letters, string.digits, string.ascii_letters, string.digits, '-', ] * 4 password = ''.join([random.choice(char) for char in chars[:-1]]) display.sensitive.add(password) return password class Display: """Manages color console output.""" clear = '\033[0m' red = '\033[31m' green = '\033[32m' yellow = '\033[33m' blue = '\033[34m' purple = '\033[35m' cyan = '\033[36m' verbosity_colors = { 0: None, 1: green, 2: blue, 3: cyan, } def __init__(self): self.verbosity = 0 self.color = sys.stdout.isatty() self.warnings = [] self.warnings_unique = set() self.info_stderr = False self.rows = 0 self.columns = 0 self.truncate = 0 self.redact = True self.sensitive = set() if os.isatty(0): self.rows, self.columns = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[:2] def __warning(self, message): # type: (str) -> None """Internal implementation for displaying a warning message.""" self.print_message('WARNING: %s' % message, color=self.purple, fd=sys.stderr) def review_warnings(self): # type: () -> None """Review all warnings which previously occurred.""" if not self.warnings: return self.__warning('Reviewing previous %d warning(s):' % len(self.warnings)) for warning in self.warnings: self.__warning(warning) def warning(self, message, unique=False, verbosity=0): # type: (str, bool, int) -> None """Display a warning level message.""" if verbosity > self.verbosity: return if unique: if message in self.warnings_unique: return self.warnings_unique.add(message) self.__warning(message) self.warnings.append(message) def notice(self, message): # type: (str) -> None """Display a notice level message.""" self.print_message('NOTICE: %s' % message, color=self.purple, fd=sys.stderr) def error(self, message): # type: (str) -> None """Display an error level message.""" self.print_message('ERROR: %s' % message, color=self.red, fd=sys.stderr) def info(self, message, verbosity=0, truncate=False): # type: (str, int, bool) -> None """Display an info level message.""" if self.verbosity >= verbosity: color = self.verbosity_colors.get(verbosity, self.yellow) self.print_message(message, color=color, fd=sys.stderr if self.info_stderr else sys.stdout, truncate=truncate) def print_message( # pylint: disable=locally-disabled, invalid-name self, message, # type: str color=None, # type: t.Optional[str] fd=sys.stdout, # type: t.TextIO truncate=False, # type: bool ): # type: (...) -> None """Display a message.""" if self.redact and self.sensitive: for item in self.sensitive: if not item: continue message = message.replace(item, '*' * len(item)) if truncate: if len(message) > self.truncate > 5: message = message[:self.truncate - 5] + ' ...' if color and self.color: # convert color resets in message to desired color message = message.replace(self.clear, color) message = '%s%s%s' % (color, message, self.clear) if sys.version_info[0] == 2: message = to_bytes(message) print(message, file=fd) fd.flush() class ApplicationError(Exception): """General application error.""" class ApplicationWarning(Exception): """General application warning which interrupts normal program flow.""" class SubprocessError(ApplicationError): """Error resulting from failed subprocess execution.""" def __init__( self, cmd, # type: t.List[str] status=0, # type: int stdout=None, # type: t.Optional[str] stderr=None, # type: t.Optional[str] runtime=None, # type: t.Optional[float] error_callback=None, # type: t.Optional[t.Callable[[SubprocessError], None]] ): # type: (...) -> None message = 'Command "%s" returned exit status %s.\n' % (' '.join(shlex.quote(c) for c in cmd), status) if stderr: message += '>>> Standard Error\n' message += '%s%s\n' % (stderr.strip(), Display.clear) if stdout: message += '>>> Standard Output\n' message += '%s%s\n' % (stdout.strip(), Display.clear) self.cmd = cmd self.message = message self.status = status self.stdout = stdout self.stderr = stderr self.runtime = runtime if error_callback: error_callback(self) self.message = self.message.strip() super().__init__(self.message) class MissingEnvironmentVariable(ApplicationError): """Error caused by missing environment variable.""" def __init__(self, name): # type: (str) -> None super().__init__('Missing environment variable: %s' % name) self.name = name def retry(func, ex_type=SubprocessError, sleep=10, attempts=10): """Retry the specified function on failure.""" for dummy in range(1, attempts): try: return func() except ex_type: time.sleep(sleep) return func() def parse_to_list_of_dict(pattern, value): # type: (str, str) -> t.List[t.Dict[str, str]] """Parse lines from the given value using the specified pattern and return the extracted list of key/value pair dictionaries.""" matched = [] unmatched = [] for line in value.splitlines(): match = re.search(pattern, line) if match: matched.append(match.groupdict()) else: unmatched.append(line) if unmatched: raise Exception('Pattern "%s" did not match values:\n%s' % (pattern, '\n'.join(unmatched))) return matched def get_subclasses(class_type): # type: (t.Type[C]) -> t.List[t.Type[C]] """Returns a list of types that are concrete subclasses of the given type.""" subclasses = set() # type: t.Set[t.Type[C]] queue = [class_type] # type: t.List[t.Type[C]] while queue: parent = queue.pop() for child in parent.__subclasses__(): if child not in subclasses: if not inspect.isabstract(child): subclasses.add(child) queue.append(child) return sorted(subclasses, key=lambda sc: sc.__name__) def is_subdir(candidate_path, path): # type: (str, str) -> bool """Returns true if candidate_path is path or a subdirectory of path.""" if not path.endswith(os.path.sep): path += os.path.sep if not candidate_path.endswith(os.path.sep): candidate_path += os.path.sep return candidate_path.startswith(path) def paths_to_dirs(paths): # type: (t.List[str]) -> t.List[str] """Returns a list of directories extracted from the given list of paths.""" dir_names = set() for path in paths: while True: path = os.path.dirname(path) if not path or path == os.path.sep: break dir_names.add(path + os.path.sep) return sorted(dir_names) def str_to_version(version): # type: (str) -> t.Tuple[int, ...] """Return a version tuple from a version string.""" return tuple(int(n) for n in version.split('.')) def version_to_str(version): # type: (t.Tuple[int, ...]) -> str """Return a version string from a version tuple.""" return '.'.join(str(n) for n in version) def sorted_versions(versions): # type: (t.List[str]) -> t.List[str] """Return a sorted copy of the given list of versions.""" return [version_to_str(version) for version in sorted(str_to_version(version) for version in versions)] def import_plugins(directory, root=None): # type: (str, t.Optional[str]) -> None """ Import plugins from the given directory relative to the given root. If the root is not provided, the 'lib' directory for the test runner will be used. """ if root is None: root = os.path.dirname(__file__) path = os.path.join(root, directory) package = __name__.rsplit('.', 1)[0] prefix = '%s.%s.' % (package, directory.replace(os.path.sep, '.')) for (_module_loader, name, _ispkg) in pkgutil.iter_modules([path], prefix=prefix): module_path = os.path.join(root, name[len(package) + 1:].replace('.', os.path.sep) + '.py') load_module(module_path, name) def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None """ Load plugins of the specified type and track them in the specified database. Only plugins which have already been imported will be loaded. """ plugins = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) # type: t.Dict[str, t.Type[C]] for plugin in plugins: database[plugin] = plugins[plugin] def load_module(path, name): # type: (str, str) -> None """Load a Python module using the given name and path.""" if name in sys.modules: return if sys.version_info >= (3, 4): import importlib.util spec = importlib.util.spec_from_file_location(name, path) module = importlib.util.module_from_spec(spec) # noinspection PyUnresolvedReferences spec.loader.exec_module(module) sys.modules[name] = module else: # noinspection PyDeprecation import imp # pylint: disable=deprecated-module # load_source (and thus load_module) require a file opened with `open` in text mode with open(to_bytes(path)) as module_file: # noinspection PyDeprecation imp.load_module(name, module_file, path, ('.py', 'r', imp.PY_SOURCE)) def sanitize_host_name(name): """Return a sanitized version of the given name, suitable for use as a hostname.""" return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-') @cache def get_host_ip(): """Return the host's IP address.""" with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: sock.connect(('10.255.255.255', 22)) host_ip = get_host_ip.ip = sock.getsockname()[0] display.info('Detected host IP: %s' % host_ip, verbosity=1) return host_ip def get_generic_type(base_type, generic_base_type): # type: (t.Type, t.Type[TType]) -> t.Optional[t.Type[TType]] """Return the generic type arg derived from the generic_base_type type that is associated
'OPTAA' and method == 'Streamed': uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample' var_list[0].name = 'time' var_list[0].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' #CSPP Data below elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered': uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'seawater_scattering_coefficient' var_list[2].name = 'fluorometric_chlorophyll_a' var_list[3].name = 'fluorometric_cdom' var_list[4].name = 'total_volume_scattering_coefficient' var_list[5].name = 'optical_backscatter' var_list[6].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[6].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'm-1' var_list[2].units = 'ug/L' var_list[3].units = 'ppb' var_list[4].units = 'm-1 sr-1' var_list[5].units = 'm-1' var_list[6].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample' var_list[0].name = 'time' var_list[1].name = 'seawater_scattering_coefficient' var_list[2].name = 'fluorometric_chlorophyll_a' var_list[3].name = 'fluorometric_cdom' var_list[4].name = 'total_volume_scattering_coefficient' var_list[5].name = 'optical_backscatter' var_list[6].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[6].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'm-1' var_list[2].units = 'ug/L' var_list[3].units = 'ppb' var_list[4].units = 'm-1 sr-1' var_list[5].units = 'm-1' var_list[6].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered': uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'seawater_scattering_coefficient' var_list[2].name = 'fluorometric_chlorophyll_a' var_list[3].name = 'fluorometric_cdom' var_list[4].name = 'total_volume_scattering_coefficient' var_list[5].name = 'optical_backscatter' var_list[6].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[6].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'm-1' var_list[2].units = 'ug/L' var_list[3].units = 'ppb' var_list[4].units = 'm-1 sr-1' var_list[5].units = 'm-1' var_list[6].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample' var_list[0].name = 'time' var_list[1].name = 'seawater_scattering_coefficient' var_list[2].name = 'fluorometric_chlorophyll_a' var_list[3].name = 'fluorometric_cdom' var_list[4].name = 'total_volume_scattering_coefficient' var_list[5].name = 'optical_backscatter' var_list[6].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[6].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'm-1' var_list[2].units = 'ug/L' var_list[3].units = 'ppb' var_list[4].units = 'm-1 sr-1' var_list[5].units = 'm-1' var_list[6].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered': uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'dissolved_oxygen' var_list[2].name = 'estimated_oxygen_concentration' var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen' var_list[4].name = 'optode_temperature' var_list[5].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/kg' var_list[2].units = 'umol/L' var_list[3].units = 'umol/L' var_list[4].units = 'degC' var_list[5].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'dissolved_oxygen' var_list[2].name = 'estimated_oxygen_concentration' var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen' var_list[4].name = 'optode_temperature' var_list[5].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/kg' var_list[2].units = 'umol/L' var_list[3].units = 'umol/L' var_list[4].units = 'degC' var_list[5].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered': uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'dissolved_oxygen' var_list[2].name = 'estimated_oxygen_concentration' var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen' var_list[4].name = 'optode_temperature' var_list[5].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/kg' var_list[2].units = 'umol/L' var_list[3].units = 'umol/L' var_list[4].units = 'degC' var_list[5].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'dissolved_oxygen' var_list[2].name = 'estimated_oxygen_concentration' var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen' var_list[4].name = 'optode_temperature' var_list[5].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/kg' var_list[2].units = 'umol/L' var_list[3].units = 'umol/L' var_list[4].units = 'degC' var_list[5].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered': uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'temperature' var_list[2].name = 'salinity' var_list[3].name = 'density' var_list[4].name = 'pressure' var_list[5].name = 'conductivity' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'degC' var_list[2].units = 'unitless' var_list[3].units = 'kg/m3' var_list[4].units = 'dbar' var_list[5].units = 'S/m' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'temperature' var_list[2].name = 'salinity' var_list[3].name = 'density' var_list[4].name = 'pressure' var_list[5].name = 'conductivity' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'degC' var_list[2].units = 'unitless' var_list[3].units = 'kg/m3' var_list[4].units = 'dbar' var_list[5].units = 'S/m' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered': uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'temperature' var_list[2].name = 'salinity' var_list[3].name = 'density' var_list[4].name = 'pressure' var_list[5].name = 'conductivity' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'degC' var_list[2].units = 'unitless' var_list[3].units = 'kg/m3' var_list[4].units = 'dbar' var_list[5].units = 'S/m' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'temperature' var_list[2].name = 'salinity' var_list[3].name = 'density' var_list[4].name = 'pressure' var_list[5].name = 'conductivity' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[4].data = np.array([]) var_list[5].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'degC' var_list[2].units = 'unitless' var_list[3].units = 'kg/m3' var_list[4].units = 'dbar' var_list[5].units = 'S/m' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered': uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'parad_j_par_counts_output' var_list[2].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol photons m-2 s-1' var_list[2].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'parad_j_par_counts_output' var_list[2].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol photons m-2 s-1' var_list[2].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered': uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument' var_list[0].name = 'time' var_list[1].name = 'parad_j_par_counts_output' var_list[2].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol photons m-2 s-1' var_list[2].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'parad_j_par_counts_output' var_list[2].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol photons m-2 s-1' var_list[2].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'salinity_corrected_nitrate' var_list[2].name = 'nitrate_concentration' var_list[3].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/L' var_list[2].units = 'umol/L' var_list[3].units = 'dbar' elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP': uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered' var_list[0].name = 'time' var_list[1].name = 'salinity_corrected_nitrate' var_list[2].name = 'nitrate_concentration' var_list[3].name = 'int_ctd_pressure' var_list[0].data = np.array([]) var_list[1].data = np.array([]) var_list[2].data = np.array([]) var_list[3].data = np.array([]) var_list[0].units = 'seconds since 1900-01-01' var_list[1].units = 'umol/L' var_list[2].units = 'umol/L' var_list[3].units = 'dbar' elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered': uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
outputdata.createVariable('Latitude', 'f4', ('south_north', 'east_west')) longitude[:] = lons latitude[:] = lats latitude.units = "Degrees North" longitude.units = "Degrees East" else: print("Appending to %s" % outputfile) outputdata = Dataset(outputfile, 'r+') # open up netCDF file for appending print("---------------------------") print("netCDF output file %s/wrfoutput_post_%s.nc Opened for writing/appending" % (self.namelist_dictionary['wrf_directory'], self.namelist_dictionary['wrf_pfx'])) print("Total # of WRF Files included in post process %i" % len(self.TimeIndexDict)) print("Total # of WRF time steps %i" % len(self.WRFTimes)) print("Doing 3D Surface data ") print(" -- Doing variables: ") for vv in self.namelist_dictionary['sfc_vars']: print(" -- %s" % vv) print("---------------------------") # Okay, I think we're finally ready to do this! ## tind = 0 # set inital index position for FULL data array. # Define a local list of levels to help readability for dfx, df in enumerate(self.TimeIndexDict): print("Grabbing Data from: %s" % df) ncdata = Dataset(df, 'r') # open for reading if np.shape(ncdata.variables['P'][:])[0] > 1: wrf_pressure = ( ncdata.variables['P'][slice(*self.TimeIndexDict[df]), 0, :] + ncdata.variables['PB'][slice(*self.TimeIndexDict[df]), 0, :]).squeeze() else: wrf_pressure = ( ncdata.variables['P'][:, 0, :] + ncdata.variables['PB'][:, 0, :]).squeeze() for v in self.namelist_dictionary['sfc_vars']: if v == 'Mean sea level pressure': if not all(item in ncdata.variables for item in ['PH', 'PHB', 'T', 'QVAPOR']): print("!!!WARNING!!!") print("PH,PHB,T, and QVAPOR are not in this datafile, cannot do %s" % v) continue print("Grabbing Sea Level Pressure") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['PH'][:])[0] > 1: HGT = (ncdata.variables['PH'][slice(*self.TimeIndexDict[df]), 0, :] + ncdata.variables['PHB'][slice(*self.TimeIndexDict[df]), 0, :] + ncdata.variables['PH'][slice(*self.TimeIndexDict[df]), 1, :] + ncdata.variables['PHB'][slice(*self.TimeIndexDict[df]), 1, :]) / 2. T = ncdata.variables['T'][slice(*self.TimeIndexDict[df]), 0, :] + 300. Q = ncdata.variables['QVAPOR'][slice(*self.TimeIndexDict[df]), 0, :] else: HGT = (ncdata.variables['PH'][:, 0, :] + ncdata.variables['PHB'][:, 0, :] + ncdata.variables['PH'][:, 1, :] + ncdata.variables['PHB'][:, 1, :]) / 2. T = ncdata.variables['T'][slice(*self.TimeIndexDict[df]), 0, :] + 300. Q = ncdata.variables['QVAPOR'][slice(*self.TimeIndexDict[df]), 0, :] T = helpers.Theta2TmpK(T, wrf_pressure.squeeze() / 100.) MSLP = helpers.mslp(wrf_pressure.squeeze()/ 100.,T.squeeze(), HGT.squeeze()/9.81, Q.squeeze()) data_dict[v][0][tind:MSLP.shape[0] + tind, :] = MSLP if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == '10 metre U wind component': if not all(item in ncdata.variables for item in ['U10', 'V10', 'COSALPHA', 'SINALPHA']): print("!!!WARNING!!!") print("U10,V10, COSALPHA,and SINALPHA, are not in thie datafile, cannot do %s" % v) continue print("Grabbing 10 meter U wind") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['U10'][:])[0] > 1: UU = ncdata.variables['U10'][slice(*self.TimeIndexDict[df]), :] if self.namelist_dictionary['scale_uv']: ## IF correcting map factor. VV = ncdata.variables['V10'][slice(*self.TimeIndexDict[df]), :] UU = UU * ncdata.variables['COSALPHA'][slice(*self.TimeIndexDict[df]), :] \ - VV * ncdata.variables['SINALPHA'][slice(*self.TimeIndexDict[df]), :] else: UU = ncdata.variables['U10'][:] if self.namelist_dictionary['scale_uv']: ## IF correcting map factor. VV = ncdata.variables['V10'][:] UU = UU * ncdata.variables['COSALPHA'][:] - VV * ncdata.variables['SINALPHA'][:] data_dict[v][0][tind:UU.shape[0] + tind, :] = UU if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == '10 metre V wind component': if not all(item in ncdata.variables for item in ['U10', 'V10', 'COSALPHA', 'SINALPHA']): print("!!!WARNING!!!") print("U10,V10, COSALPHA,and SINALPHA, are not in thie datafile, cannot do %s" % v) continue print("Grabbing 10 meter V wind") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['V10'][:])[0] > 1: VV = ncdata.variables['V10'][slice(*self.TimeIndexDict[df]), :] if self.namelist_dictionary['scale_uv']: ## IF correcting map factor. UU = ncdata.variables['U10'][slice(*self.TimeIndexDict[df]), :] VV = VV * ncdata.variables['COSALPHA'][slice(*self.TimeIndexDict[df]), :] \ + UU * ncdata.variables['SINALPHA'][slice(*self.TimeIndexDict[df]), :] else: VV = ncdata.variables['V10'][:] if self.namelist_dictionary['scale_uv']: ## IF correcting map factor. UU = ncdata.variables['U10'][:] VV = VV * ncdata.variables['COSALPHA'][:] + UU * ncdata.variables['SINALPHA'][:] data_dict[v][0][tind:VV.shape[0] + tind, :] = VV if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == '2 metre temperature': if not all(item in ncdata.variables for item in ['T2']): print("!!!WARNING!!!") print("T2 is not in this datafile, cannot do %s" % v) continue if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] print("Grabbing 2 meter temperature") TT = self.standard_sfc_var('T2', ncdata, df) data_dict[v][0][tind:TT.shape[0] + tind, :] = TT if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == 'Soil Moisture': if not all(item in ncdata.variables for item in ['SMOIS', 'ZS']): print("!!!WARNING!!!") print("Variable SMOIS and ZS are not in this datafile, cannot do %s" % v) continue ## SOIL MOISTURE IS SPECIAL, WILL LOOP THROUGH ALL HEIGHTS AND MAKE SOIL if np.ndim(ncdata.variables['ZS'][:]) > 1: ZS = ncdata['ZS'][0, :].squeeze() * 100. ## Time / Soil Height in cm else: ZS = ncdata['ZS'][:].squeeze() * 100. if dfx == 0: ## NOW NEED TO MAKE NEW VARIABLES FOR EACH SOIL LEVEL. for z in ZS: vsm = '%s at %i cm below surface'%(v,z) data_dict[vsm] = [np.zeros([len(self.WRFTimes)] +list(np.shape(lons)))] print("Grabbing Soil Moisture") SM = self.standard_sfc_var('SMOIS', ncdata, df) for zdx, z in enumerate(ZS): vsm = '%s at %i cm below surface' % (v, z) data_dict[vsm][0][tind:SM.shape[0] + tind, :] = SM[:,zdx,:] if len(data_dict[vsm]) == 1: data_dict[vsm].append(sfc_unit_dict[v]) data_dict[vsm].append(sfc_description_dict[v]+ '%i cm below the surface' %z) if v == '2 metre dewpoint temperature': if not all(item in ncdata.variables for item in ['Q2', 'PSFC']): print("!!!WARNING!!!") print("Q2 and PSFC are not in this datafile, cannot do %s" % v) continue print("Grabbing 2 meter dew point temperature") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['Q2'][:])[0] > 1: Q2 = ncdata.variables['Q2'][slice(*self.TimeIndexDict[df]), :] press = ncdata.variables['PSFC'][slice(*self.TimeIndexDict[df]), :] else: Q2 = ncdata.variables['Q2'][:] press = ncdata.variables['PSFC'][:] E2 = helpers.MixR2VaporPress(Q2, press/100.) TD = helpers.DewPoint(E2) data_dict[v][0][tind:TT.shape[0] + tind, :] = TD if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == 'Total column water vapour': if not all(item in ncdata.variables for item in ['P', 'PB', 'PSFC', 'QVAPOR']): print("!!!WARNING!!!") print("P and PSFC, PB, and QVAPOR are not in this datafile, cannot do %s" % v) continue print("Grabbing Total column water vapor (a.k.a., precipitable water)") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['P'][:])[0] > 1: PRES1 = ncdata.variables['P'][slice(*self.TimeIndexDict[df]), :] \ + ncdata.variables['PB'][slice(*self.TimeIndexDict[df]), :] # Add base-state pressure PSFC = ncdata.variables['PSFC'][slice(*self.TimeIndexDict[df]), :] PTOTAL = np.concatenate((PSFC[:, None, :, :], PRES1), axis=1) # ADD SURFACE PRESSURE TO Pressure DP = np.diff(PTOTAL, axis=1) # GET DP, used in all calculations! QV = ncdata.variables['QVAPOR'][slice(*self.TimeIndexDict[df]), :] # WATER VAPOR MIXING RATIO! else: PRES1 = ncdata.variables['P'][:] + ncdata.variables['PB'][:] # Add base-state pressure PSFC = ncdata.variables['PSFC'][:] PTOTAL = np.concatenate((PSFC[:, None, :, :], PRES1), axis=1) # ADD SURFACE PRESSURE TO Pressure DP = np.diff(PTOTAL, axis=1) # GET DP, used in all calculations! QV = ncdata.variables['QVAPOR'][:] # WATER VAPOR MIXING RATIO! pwater = -1. / (9.81 * 1000.) * np.sum(QV * DP, axis=1) * 1000. # To get mm data_dict[v][0][tind:pwater.shape[0] + tind, :] = pwater if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) if v == '1km AGL Reflectivity': prg_time=datetime.now() ## 1km above ground reflectivity. if not all(item in ncdata.variables for item in ['PHB', 'PH', 'REFL_10CM']): print("!!!WARNING!!!") print("PHB', 'PH', 'REFL_10CM are not in this datafile" ", cannot do %s" % v) continue print("Computing Reflectivity at 1km AGL") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] REF=ncdata.variables['REFL_10CM'] zind = REF.dimensions.index('bottom_top') if np.shape(ncdata.variables['PHB'][:])[0] > 1: REF = ncdata.variables['REFL_10CM'][slice(*self.TimeIndexDict[df]), :, :] HGT = ncdata.variables['PH'][slice(*self.TimeIndexDict[df]), :] + \ ncdata.variables['PHB'][slice(*self.TimeIndexDict[df]), :] else: REF = ncdata.variables['REFL_10CM'][:, :, :] HGT = ncdata.variables['PH'][:, :] + \ ncdata.variables['PHB'][:, :] HGT = (HGT[:, :-1, :] + HGT[:, 1:, :]) / 2. ## Unstagger the Height variable HGT=HGT / helpers.constants['g'] ## Convert to geometric height. REF1km = helpers.z_interp(REF, HGT, lev=1000, zind=zind) data_dict[v][0][tind:REF1km.shape[0] + tind, :] = REF1km.squeeze() if len(data_dict[v]) == 1: data_dict[v].append(sfc_unit_dict[v]) data_dict[v].append(sfc_description_dict[v]) end_prg_time = datetime.now() print("Time to Interpolate 1km Reflectivity: %.2f seconds" % ( (end_prg_time - prg_time).total_seconds())) if v == 'Visibility (Surface)': if not all(item in ncdata.variables for item in ['P', 'PB', 'T', 'QVAPOR', 'QCLOUD', 'QICE', 'QRAIN', 'QSNOW']): print("!!!WARNING!!!") print("P, PB, T, QCLOUD, QRAIN, QSNOW, QICE, and and QVAPOR are not in this datafile" ", cannot do %s" % v) continue print("Computing Visibility at first model level") if dfx == 0: data_dict[v] = [np.zeros([len(self.WRFTimes)] + list(np.shape(lons)))] if np.shape(ncdata.variables['P'][:])[0] > 1: PRES = ncdata.variables['P'][slice(*self.TimeIndexDict[df]), 0, :] + ncdata.variables['PB'][ slice(*self.TimeIndexDict[df]), 0, :] # Add base-state pressure TT = ncdata.variables['T'][slice(*self.TimeIndexDict[df]), 0, :] + 300. QV = ncdata.variables['QVAPOR'][slice(*self.TimeIndexDict[df]), 0, :] QC = ncdata.variables['QCLOUD'][slice(*self.TimeIndexDict[df]), 0, :] QI = ncdata.variables['QICE'][slice(*self.TimeIndexDict[df]), 0, :] QS = ncdata.variables['QSNOW'][slice(*self.TimeIndexDict[df]), 0, :] QR = ncdata.variables['QRAIN'][slice(*self.TimeIndexDict[df]), 0, :] else: PRES = ncdata.variables['P'][:, 0, :] + ncdata.variables['PB'][:, 0, :] # Add base-state pressure TT = ncdata.variables['T'][:, 0, :] + 300. QV =
<reponame>hi117/pythonql from pythonql.algebra.operator import plan_from_list from pythonql.algebra.operators import * from pythonql.PQTuple import PQTuple from pythonql.helpers import flatten from pythonql.Rewriter import rewrite from pythonql.debug import Debug import json import types def make_pql_tuple(vals,lcs): t = [] als = [] for v in vals: t.append(eval(v[0],lcs,globals())) alias = v[1] if v[1] else v[0] als.append(alias) schema = {n:i for (i,n) in enumerate(als)} return PQTuple(t,schema) def str_dec(string): res = "" prev_slash = False for ch in string: if ch == chr(92): if not prev_slash: prev_slash = True else: res += ch prev_slash = False else: prev_slash = False res += ch return res # isList predicate for path expressions def isList(x): return (hasattr(x,'__iter__') and not hasattr(x,'keys') and not isinstance(x,str)) # isMap predicate for path expression def isMap(x): return hasattr(x,'keys') # Implement a child step on some collection or map def PQChildPath (coll,f,lcs): f = eval(str_dec(f), globals(), lcs) if f!='_' else None if isList(coll): for i in flatten(coll): if isMap(i): for j in i.keys(): if f is None: yield i[j] elif f and j==f: yield i[j] if isMap(coll): for i in coll.keys(): if f is None: yield coll[i] elif f and i==f: yield coll[i] class map_tuple: def __init__(self,key,value): self.key = key self.value = value def __repr__(self): return ("<" + repr(self.key) + ":" + repr(self.value) + ">") # Implement a descendents path on some collection or map def PQDescPath(coll,f,lcs): f = eval(f,globals(),lcs) if f!='_' else None stack = [] if isList(coll): stack = [i for i in flatten(coll)] elif isMap(coll): stack = [map_tuple(k,v) for (k,v) in coll.items()] while stack: i = stack.pop() if isinstance(i,map_tuple): if f is None: yield i.value elif f and i.key==f: yield i.value i = i.value if isList(i): it = iter(i) frst = next(it) [stack.append(j) for j in it] if isList(frst): stack.extend([ci for ci in frst]) elif isMap(frst): stack.extend([map_tuple(k,v) for (k,v) in frst.items()]) elif isMap(i): keys = list(i.keys()) [stack.append(map_tuple(j,i[j])) for j in keys] def PQTry( try_expr, except_expr, lcs): try_expr = str_dec(try_expr) except_expr = str_dec(except_expr) try: return eval(try_expr,lcs,globals()) except: return eval(except_expr,lcs,globals()) # create a table with an empty tuple def emptyTuple(schema): return PQTuple([None] * len(schema), schema) # Execute the query def PyQuery( clauses, prior_locs, prior_globs, returnType ): data = [] data.append( emptyTuple([]) ) clauses = list(clauses) clauses.reverse() plan = plan_from_list(clauses) plan = rewrite(plan, prior_locs) if Debug().print_optimized: print("Rewritten query:",plan) data = plan.execute(data, prior_locs, prior_globs) if returnType == "gen": return data elif returnType == "list": return list(data) elif returnType == "set": return set(data) else: return dict(data) # Process Select clause # We still keep that feature of generating tuples for now def processSelectClause(c, table, prior_lcs, prior_globs): # If this is a list/set comprehension: if c.expr: # Compile the expression: e = compile(c.expr.lstrip(), '<string>','eval') for t in table: lcs = dict(prior_lcs) lcs.update(t.getDict()) yield eval(e,prior_globs,lcs) else: k_expr = compile(c.key_expr.lstrip(),'<string>','eval') v_expr = compile(c.value_expr.lstrip(),'<string>','eval') for t in table: lcs = prior_lcs lcs.update(t.getDict()) k = eval(k_expr,prior_globs,lcs) v = eval(v_expr,prior_globs,lcs) yield (k,v) # Process the for clause. This clause creates a cartesian # product of the input table with new sequence def processForClause(c, table, prior_lcs, prior_globs): new_schema = None print(c.expr) comp_expr = compile(c.expr.lstrip(), "<string>", "eval") for t in table: if not new_schema: new_schema = dict(t.schema) for (i,v) in enumerate(c.vars): new_schema[v] = len(t.schema) + i lcs = dict(prior_lcs) lcs.update(t.getDict()) vals = eval(comp_expr, prior_globs, lcs) if len(c.vars) == 1: for v in vals: new_t_data = list(t.tuple) new_t_data.append(v) new_t = PQTuple(new_t_data, new_schema) yield new_t else: for v in vals: unpack_expr = "[ %s for %s in [ __v ]]" % ( '(' + ",".join(c.vars) + ')', c.unpack) unpacked_vals = eval(unpack_expr, prior_globs, {'__v':v}) new_t_data = list(t.tuple) for tv in unpacked_vals[0]: new_t_data.append(tv) new_t = PQTuple(new_t_data, new_schema) yield new_t # Process the let clause. Here we just add a variable to each # input tuple def processLetClause(c, table, prior_lcs, prior_globs): comp_expr = compile(c.expr.lstrip(), "<string>", "eval") new_schema = None for t in table: if not new_schema: new_schema = dict(t.schema) for (i,v) in enumerate(c.vars): new_schema[v] = len(t.schema) + i lcs = dict(prior_lcs) lcs.update(t.getDict()) v = eval(comp_expr, prior_globs, lcs) if len(c.vars) == 1: t.tuple.append(v) new_t = PQTuple( t.tuple, new_schema ) yield new_t else: unpack_expr = "[ %s for %s in [ __v ]]" % ( '(' + ",".join(c.vars) + ')', c.unpack) unpacked_vals = eval(unpack_expr, prior_globs, {'__v':v}) new_t_data = list(t.tuple) for tv in unpacked_vals[0]: new_t_data.append(tv) new_t = PQTuple(new_t_data, new_schema) yield new_t # Process a join def processJoin(c, table, prior_lcs, prior_globs, left_arg, right_arg): new_schema = None left_conds = c.left_conds right_conds = c.right_conds join_type = 'nl' dir = 'right' if c.hint: join_type = c.hint['join_type'] dir = c.hint['dir'] if dir == 'left': left_arg,right_arg = right_arg,left_arg r_init_data = [] r_init_data.append( emptyTuple([]) ) # Build an index on the right relation, if we're doing # an index join. index = None if join_type == 'index': index = {} r_data = r_init_data r_data = right_arg.execute(r_data, prior_lcs) for t in r_data: index_tuple = [] for rcond in right_conds: lcs = dict(prior_lcs) lcs.update(t.getDict()) rcond_val = eval(rcond, prior_globs, lcs) index_tuple.append( rcond_val ) index_tuple = tuple(index_tuple) if not index_tuple in index: index[ index_tuple ] = [] index[ index_tuple ].append( t ) # Iterate over the tuples of the left relation and # compute the tuple of condition vars table = left_arg.execute(table, prior_lcs, prior_globs) for t in table: cond_tuple = [] for lcond in left_conds: lcs = dict(prior_lcs) lcs.update(t.getDict()) lcond_val = eval(lcond, prior_globs, lcs) cond_tuple.append( lcond_val ) cond_tuple = tuple(cond_tuple) if index: if cond_tuple in index: for t2 in index[cond_tuple]: if not new_schema: new_schema = dict(t.schema) for i,_ in enumerate(t2): v = [x for x in t2.schema.items() if x[1]==i][0][0] new_schema[v] = len(new_schema) + i new_t_data = list(t.tuple) new_t_data += list(t2.tuple) new_t = PQTuple(new_t_data, new_schema) yield new_t else: continue else: r_data = r_init_data r_data = right_arg.execute(r_data, prior_lcs, prior_globs) for t2 in r_data: rcond_tuple = [] for rcond in right_conds: lcs = dict(prior_lcs) lcs.update(t2.getDict()) rcond_val = eval(rcond, prior_globs, lcs) rcond_tuple.append( rcond_val ) rcond_tuple = tuple(rcond_tuple) if cond_tuple == rcond_tuple: if not new_schema: new_schema = dict(t.schema) for i,_ in enumerate(t2): v = [x for x in t2.schema.items() if x[1]==i][0][0] new_schema[v] = len(new_schema) + i new_t_data = list(t.tuple) new_t_data += list(t2.tuple) new_t = PQTuple(new_t_data, new_schema) yield new_t # Process the match claise def processMatchClause(c, table, prior_lcs, prior_globs): clause_expr = compile(c.expr, "<string>", "eval") # Fetch and compile all expressions in the # pattern match clause e_patterns = [] patterns = list(c.pattern) while patterns: p = patterns.pop() if 'expr_cond' in p: e_patterns.append(p) if 'pattern' in p: patterns.append(p['pattern']) for ep in e_patterns: ep['expr_cond'] = compile(ep["expr_cond"], "<string>", "eval") new_schema = None for t in table: if not new_schema: new_schema = dict(t.schema) for (i,v) in enumerate(c.vars): new_schema[v] = len(t.schema) + i lcs = dict(prior_lcs) lcs.update(t.getDict()) vals = eval(clause_expr, prior_globs, lcs) for v in vals: if not hasattr(v, '__contains__'): continue new_t_data = list(t.tuple) + [None]*len(c.vars) new_t = PQTuple(new_t_data, new_schema) if match_pattern(c.pattern, c.exact, v, new_t, lcs, prior_globs): yield new_t def match_pattern(ps, isExact, v, new_t, lcs, prior_globs): all_heads = [] for p in [x for x in ps if 'match' in x]: match = p['match'][1:-1] all_heads.append(match) if match not in v: return False if 'const_cond' in p: if v[match] != p['const_cond'][1:-1]: return False if 'bind_to' in p: new_t[p['bind_to']] = v[match] lcs.update({p['bind_to']:v[match]}) if 'expr_cond' in p: val = eval(p['expr_cond'], prior_globs, lcs) if not val: return False if 'pattern' in p: if not match_pattern(p['pattern'], isExact, v[match], new_t, lcs, prior_globs): return False if isExact and any([x for x in v if x not in all_heads]): return False bind_parent = next((x for x in ps if 'bind_parent_to' in x), None) if bind_parent: new_t[bind_parent['bind_parent_to']] = v lcs.update({bind_parent['bind_parent_to']:v}) return True # Process the count clause. Similar to let, but simpler def processCountClause(c, table, prior_lcs, prior_globs): new_schema = None for (i,t) in enumerate(table): if not new_schema: new_schema = dict(t.schema) new_schema[c.var] = len(t.schema) new_t = PQTuple( t.tuple + [i], new_schema ) yield new_t # Process the group-by def processGroupByClause(c, table, prior_lcs, prior_globs): gby_aliases = [g if isinstance(g,str) else g[1] for g in c.groupby_list] gby_exprs = [g if isinstance(g,str) else g[0] for g in c.groupby_list] comp_exprs = [compile(e,'<string>','eval') for e in gby_exprs] grp_table = {} schema = None # Group tuples in a hashtable for t in
# Copyright (c) 2001-2008 Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.python.log}. """ import os, sys, time, logging, warnings from cStringIO import StringIO from twisted.trial import unittest from twisted.python import log, failure class FakeWarning(Warning): """ A unique L{Warning} subclass used by tests for interactions of L{twisted.python.log} with the L{warnings} module. """ class LogTest(unittest.TestCase): def setUp(self): self.catcher = [] observer = self.catcher.append log.addObserver(observer) self.addCleanup(log.removeObserver, observer) def testObservation(self): catcher = self.catcher log.msg("test", testShouldCatch=True) i = catcher.pop() self.assertEquals(i["message"][0], "test") self.assertEquals(i["testShouldCatch"], True) self.failUnless(i.has_key("time")) self.assertEquals(len(catcher), 0) def testContext(self): catcher = self.catcher log.callWithContext({"subsystem": "not the default", "subsubsystem": "a", "other": "c"}, log.callWithContext, {"subsubsystem": "b"}, log.msg, "foo", other="d") i = catcher.pop() self.assertEquals(i['subsubsystem'], 'b') self.assertEquals(i['subsystem'], 'not the default') self.assertEquals(i['other'], 'd') self.assertEquals(i['message'][0], 'foo') def testErrors(self): for e, ig in [("hello world","hello world"), (KeyError(), KeyError), (failure.Failure(RuntimeError()), RuntimeError)]: log.err(e) i = self.catcher.pop() self.assertEquals(i['isError'], 1) self.flushLoggedErrors(ig) def testErrorsWithWhy(self): for e, ig in [("hello world","hello world"), (KeyError(), KeyError), (failure.Failure(RuntimeError()), RuntimeError)]: log.err(e, 'foobar') i = self.catcher.pop() self.assertEquals(i['isError'], 1) self.assertEquals(i['why'], 'foobar') self.flushLoggedErrors(ig) def test_erroneousErrors(self): """ Exceptions raised by log observers are logged but the observer which raised the exception remains registered with the publisher. These exceptions do not prevent the event from being sent to other observers registered with the publisher. """ L1 = [] L2 = [] def broken(events): 1 / 0 for observer in [L1.append, broken, L2.append]: log.addObserver(observer) self.addCleanup(log.removeObserver, observer) for i in xrange(3): # Reset the lists for simpler comparison. L1[:] = [] L2[:] = [] # Send out the event which will break one of the observers. log.msg("Howdy, y'all.") # The broken observer should have caused this to be logged. There # is a slight bug with LogPublisher - when it logs an error from an # observer, it uses the global "err", which is not necessarily # associated with it, but which may be associated with a different # LogPublisher! See #3307. excs = self.flushLoggedErrors(ZeroDivisionError) self.assertEqual(len(excs), 1) # Both other observers should have seen the message. self.assertEquals(len(L1), 2) self.assertEquals(len(L2), 2) # The order is slightly wrong here. The first event should be # delivered to all observers; then, errors should be delivered. self.assertEquals(L1[1]['message'], ("Howdy, y'all.",)) self.assertEquals(L2[0]['message'], ("Howdy, y'all.",)) def test_showwarning(self): """ L{twisted.python.log.showwarning} emits the warning as a message to the Twisted logging system. """ log.showwarning( FakeWarning("unique warning message"), FakeWarning, "warning-filename.py", 27) event = self.catcher.pop() self.assertEqual( event['format'] % event, 'warning-filename.py:27: twisted.test.test_log.FakeWarning: ' 'unique warning message') self.assertEqual(self.catcher, []) # Python 2.6 requires that any function used to override the # warnings.showwarning API accept a "line" parameter or a # deprecation warning is emitted. log.showwarning( FakeWarning("unique warning message"), FakeWarning, "warning-filename.py", 27, line=object()) event = self.catcher.pop() self.assertEqual( event['format'] % event, 'warning-filename.py:27: twisted.test.test_log.FakeWarning: ' 'unique warning message') self.assertEqual(self.catcher, []) def test_warningToFile(self): """ L{twisted.python.log.showwarning} passes warnings with an explicit file target on to the underlying Python warning system. """ message = "another unique message" category = FakeWarning filename = "warning-filename.py" lineno = 31 output = StringIO() log.showwarning(message, category, filename, lineno, file=output) self.assertEqual( output.getvalue(), warnings.formatwarning(message, category, filename, lineno)) # In Python 2.6, warnings.showwarning accepts a "line" argument which # gives the source line the warning message is to include. if sys.version_info >= (2, 6): line = "hello world" output = StringIO() log.showwarning(message, category, filename, lineno, file=output, line=line) self.assertEqual( output.getvalue(), warnings.formatwarning(message, category, filename, lineno, line)) class FakeFile(list): def write(self, bytes): self.append(bytes) def flush(self): pass class EvilStr: def __str__(self): 1/0 class EvilRepr: def __str__(self): return "Happy Evil Repr" def __repr__(self): 1/0 class EvilReprStr(EvilStr, EvilRepr): pass class LogPublisherTestCaseMixin: def setUp(self): """ Add a log observer which records log events in C{self.out}. Also, make sure the default string encoding is ASCII so that L{testSingleUnicode} can test the behavior of logging unencodable unicode messages. """ self.out = FakeFile() self.lp = log.LogPublisher() self.flo = log.FileLogObserver(self.out) self.lp.addObserver(self.flo.emit) try: str(u'\N{VULGAR FRACTION ONE HALF}') except UnicodeEncodeError: # This is the behavior we want - don't change anything. self._origEncoding = None else: reload(sys) self._origEncoding = sys.getdefaultencoding() sys.setdefaultencoding('ascii') def tearDown(self): """ Verify that everything written to the fake file C{self.out} was a C{str}. Also, restore the default string encoding to its previous setting, if it was modified by L{setUp}. """ for chunk in self.out: self.failUnless(isinstance(chunk, str), "%r was not a string" % (chunk,)) if self._origEncoding is not None: sys.setdefaultencoding(self._origEncoding) del sys.setdefaultencoding class LogPublisherTestCase(LogPublisherTestCaseMixin, unittest.TestCase): def testSingleString(self): self.lp.msg("Hello, world.") self.assertEquals(len(self.out), 1) def testMultipleString(self): # Test some stupid behavior that will be deprecated real soon. # If you are reading this and trying to learn how the logging # system works, *do not use this feature*. self.lp.msg("Hello, ", "world.") self.assertEquals(len(self.out), 1) def testSingleUnicode(self): self.lp.msg(u"Hello, \N{VULGAR FRACTION ONE HALF} world.") self.assertEquals(len(self.out), 1) self.assertIn('with str error Traceback', self.out[0]) self.assertIn('UnicodeEncodeError', self.out[0]) class FileObserverTestCase(LogPublisherTestCaseMixin, unittest.TestCase): def test_getTimezoneOffset(self): """ Attempt to verify that L{FileLogObserver.getTimezoneOffset} returns correct values for the current C{TZ} environment setting. Do this by setting C{TZ} to various well-known values and asserting that the reported offset is correct. """ localDaylightTuple = (2006, 6, 30, 0, 0, 0, 4, 181, 1) utcDaylightTimestamp = time.mktime(localDaylightTuple) localStandardTuple = (2007, 1, 31, 0, 0, 0, 2, 31, 0) utcStandardTimestamp = time.mktime(localStandardTuple) originalTimezone = os.environ.get('TZ', None) try: # Test something west of UTC os.environ['TZ'] = 'America/New_York' time.tzset() self.assertEqual( self.flo.getTimezoneOffset(utcDaylightTimestamp), 14400) self.assertEqual( self.flo.getTimezoneOffset(utcStandardTimestamp), 18000) # Test something east of UTC os.environ['TZ'] = 'Europe/Berlin' time.tzset() self.assertEqual( self.flo.getTimezoneOffset(utcDaylightTimestamp), -7200) self.assertEqual( self.flo.getTimezoneOffset(utcStandardTimestamp), -3600) # Test a timezone that doesn't have DST os.environ['TZ'] = 'Africa/Johannesburg' time.tzset() self.assertEqual( self.flo.getTimezoneOffset(utcDaylightTimestamp), -7200) self.assertEqual( self.flo.getTimezoneOffset(utcStandardTimestamp), -7200) finally: if originalTimezone is None: del os.environ['TZ'] else: os.environ['TZ'] = originalTimezone time.tzset() if getattr(time, 'tzset', None) is None: test_getTimezoneOffset.skip = ( "Platform cannot change timezone, cannot verify correct offsets " "in well-known timezones.") def test_timeFormatting(self): """ Test the method of L{FileLogObserver} which turns a timestamp into a human-readable string. """ # There is no function in the time module which converts a UTC time # tuple to a timestamp. when = time.mktime((2001, 2, 3, 4, 5, 6, 7, 8, 0)) - time.timezone # Pretend to be in US/Eastern for a moment self.flo.getTimezoneOffset = lambda when: 18000 self.assertEquals(self.flo.formatTime(when), '2001-02-02 23:05:06-0500') # Okay now we're in Eastern Europe somewhere self.flo.getTimezoneOffset = lambda when: -3600 self.assertEquals(self.flo.formatTime(when), '2001-02-03 05:05:06+0100') # And off in the Pacific or someplace like that self.flo.getTimezoneOffset = lambda when: -39600 self.assertEquals(self.flo.formatTime(when), '2001-02-03 15:05:06+1100') # One of those weird places with a half-hour offset timezone self.flo.getTimezoneOffset = lambda when: 5400 self.assertEquals(self.flo.formatTime(when), '2001-02-03 02:35:06-0130') # Half-hour offset in the other direction self.flo.getTimezoneOffset = lambda when: -5400 self.assertEquals(self.flo.formatTime(when), '2001-02-03 05:35:06+0130') # Test an offset which is between 0 and 60 minutes to make sure the # sign comes out properly in that case. self.flo.getTimezoneOffset = lambda when: 1800 self.assertEquals(self.flo.formatTime(when), '2001-02-03 03:35:06-0030') # Test an offset between 0 and 60 minutes in the other direction. self.flo.getTimezoneOffset = lambda when: -1800 self.assertEquals(self.flo.formatTime(when), '2001-02-03 04:35:06+0030') # If a strftime-format string is present on the logger, it should # use that instead. Note we don't assert anything about day, hour # or minute because we cannot easily control what time.strftime() # thinks the local timezone is. self.flo.timeFormat = '%Y %m' self.assertEquals(self.flo.formatTime(when), '2001 02') def test_loggingAnObjectWithBroken__str__(self): #HELLO, MCFLY self.lp.msg(EvilStr()) self.assertEquals(len(self.out), 1) # Logging system shouldn't need to crap itself for this trivial case self.assertNotIn('UNFORMATTABLE', self.out[0]) def test_formattingAnObjectWithBroken__str__(self): self.lp.msg(format='%(blat)s', blat=EvilStr()) self.assertEquals(len(self.out), 1) self.assertIn('Invalid format string or unformattable object', self.out[0]) def test_brokenSystem__str__(self): self.lp.msg('huh', system=EvilStr()) self.assertEquals(len(self.out), 1) self.assertIn('Invalid format string or unformattable object', self.out[0]) def test_formattingAnObjectWithBroken__repr__Indirect(self): self.lp.msg(format='%(blat)s', blat=[EvilRepr()]) self.assertEquals(len(self.out), 1) self.assertIn('UNFORMATTABLE OBJECT', self.out[0]) def test_systemWithBroker__repr__Indirect(self): self.lp.msg('huh', system=[EvilRepr()]) self.assertEquals(len(self.out), 1) self.assertIn('UNFORMATTABLE OBJECT', self.out[0]) def test_simpleBrokenFormat(self): self.lp.msg(format='hooj %s %s', blat=1) self.assertEquals(len(self.out), 1) self.assertIn('Invalid format string or unformattable object', self.out[0]) def test_ridiculousFormat(self): self.lp.msg(format=42, blat=1) self.assertEquals(len(self.out), 1) self.assertIn('Invalid format string or unformattable object', self.out[0]) def test_evilFormat__repr__And__str__(self): self.lp.msg(format=EvilReprStr(), blat=1) self.assertEquals(len(self.out), 1) self.assertIn('PATHOLOGICAL', self.out[0]) def test_strangeEventDict(self): """ This kind of eventDict used to fail silently, so test it does. """ self.lp.msg(message='', isError=False) self.assertEquals(len(self.out), 0) class PythonLoggingObserverTestCase(unittest.TestCase): """ Test the bridge with python logging module. """ def setUp(self): self.out = StringIO() rootLogger = logging.getLogger("") self.originalLevel = rootLogger.getEffectiveLevel() rootLogger.setLevel(logging.DEBUG) self.hdlr = logging.StreamHandler(self.out) fmt = logging.Formatter(logging.BASIC_FORMAT) self.hdlr.setFormatter(fmt) rootLogger.addHandler(self.hdlr) self.lp = log.LogPublisher() self.obs = log.PythonLoggingObserver() self.lp.addObserver(self.obs.emit) def
and k == j + 1 and j == i + 1: STRAIGHT_CSSHC.append({C[i], S[j], S[k], H[l], C[m]}) STRAIGHT_CSSHC.append({C[9], S[10], S[11], H[12], C[0]}) STRAIGHT_CSSHH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSHH.append({C[i], S[j], S[k], H[l], H[m]}) STRAIGHT_CSSHH.append({C[9], S[10], S[11], H[12], H[0]}) STRAIGHT_CSSHD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSHD.append({C[i], S[j], S[k], H[l], D[m]}) STRAIGHT_CSSHD.append({C[9], S[10], S[11], H[12], D[0]}) STRAIGHT_CSSDS = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSDS.append({C[i], S[j], S[k], D[l], S[m]}) STRAIGHT_CSSDS.append({C[9], S[10], S[11], D[12], S[0]}) STRAIGHT_CSSDC = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSDC.append({C[i], S[j], S[k], D[l], C[m]}) STRAIGHT_CSSDC.append({C[9], S[10], S[11], D[12], C[0]}) STRAIGHT_CSSDH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSDH.append({C[i], S[j], S[k], D[l], H[m]}) STRAIGHT_CSSDH.append({C[9], S[10], S[11], D[12], H[0]}) STRAIGHT_CSSDD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSSDD.append({C[i], S[j], S[k], D[l], D[m]}) STRAIGHT_CSSDD.append({C[9], S[10], S[11], D[12], D[0]}) STRAIGHT_CSCSS = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCSS.append({C[i], S[j], C[k], S[l], S[m]}) STRAIGHT_CSCSS.append({C[9], S[10], C[11], S[12], S[0]}) STRAIGHT_CSCSC = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCSC.append({C[i], S[j], C[k], S[l], C[m]}) STRAIGHT_CSCSC.append({C[9], S[10], C[11], S[12], C[0]}) STRAIGHT_CSCSH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCSH.append({C[i], S[j], C[k], S[l], H[m]}) STRAIGHT_CSCSH.append({C[9], S[10], C[11], S[12], H[0]}) STRAIGHT_CSCSD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCSD.append({C[i], S[j], C[k], S[l], D[m]}) STRAIGHT_CSCSD.append({C[9], S[10], C[11], S[12], D[0]}) STRAIGHT_CSCCS = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCCS.append({C[i], S[j], C[k], C[l], S[m]}) STRAIGHT_CSCCS.append({C[9], S[10], C[11], C[12], S[0]}) STRAIGHT_CSCCC = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCCC.append({C[i], S[j], C[k], C[l], C[m]}) STRAIGHT_CSCCC.append({C[9], S[10], C[11], C[12], C[0]}) STRAIGHT_CSCCH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCCH.append({C[i], S[j], C[k], C[l], H[m]}) STRAIGHT_CSCCH.append({C[9], S[10], C[11], C[12], H[0]}) STRAIGHT_CSCCD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCCD.append({C[i], S[j], C[k], C[l], D[m]}) STRAIGHT_CSCCD.append({C[9], S[10], C[11], C[12], D[0]}) STRAIGHT_CSCHS = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCHS.append({C[i], S[j], C[k], H[l], S[m]}) STRAIGHT_CSCHS.append({C[9], S[10], C[11], H[12], S[0]}) STRAIGHT_CSCHC = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCHC.append({C[i], S[j], C[k], H[l], C[m]}) STRAIGHT_CSCHC.append({C[9], S[10], C[11], H[12], C[0]}) STRAIGHT_CSCHH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCHH.append({C[i], S[j], C[k], H[l], H[m]}) STRAIGHT_CSCHH.append({C[9], S[10], C[11], H[12], H[0]}) STRAIGHT_CSCHD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCHD.append({C[i], S[j], C[k], H[l], D[m]}) STRAIGHT_CSCHD.append({C[9], S[10], C[11], H[12], D[0]}) STRAIGHT_CSCDS = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCDS.append({C[i], S[j], C[k], D[l], S[m]}) STRAIGHT_CSCDS.append({C[9], S[10], C[11], D[12], S[0]}) STRAIGHT_CSCDC = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCDC.append({C[i], S[j], C[k], D[l], C[m]}) STRAIGHT_CSCDC.append({C[9], S[10], C[11], D[12], C[0]}) STRAIGHT_CSCDH = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1: STRAIGHT_CSCDH.append({C[i], S[j], C[k], D[l], H[m]}) STRAIGHT_CSCDH.append({C[9], S[10], C[11], D[12], H[0]}) STRAIGHT_CSCDD = [] for i in range(13): for j in range(1, 13): for k in range(2, 13): for l in range(3, 13): for m in range(4, 13): if m == l + 1 and l == k + 1 and k == j
desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CONTENT = "content" CONTENT_DESC = "content desc" CONTENT_URL = "contentUrl" CONTENT_URL_DESC = "contentUrl desc" CREATED_BY_APP_ID = "createdByAppId" CREATED_BY_APP_ID_DESC = "createdByAppId desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" LEVEL = "level" LEVEL_DESC = "level desc" LINKS = "links" LINKS_DESC = "links desc" ORDER = "order" ORDER_DESC = "order desc" TITLE = "title" TITLE_DESC = "title desc" USER_TAGS = "userTags" USER_TAGS_DESC = "userTags desc" class Enum506(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CONTENT = "content" CONTENT_URL = "contentUrl" CREATED_BY_APP_ID = "createdByAppId" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LEVEL = "level" LINKS = "links" ORDER = "order" TITLE = "title" USER_TAGS = "userTags" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum507(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum508(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CONTENT = "content" CONTENT_URL = "contentUrl" CREATED_BY_APP_ID = "createdByAppId" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LEVEL = "level" LINKS = "links" ORDER = "order" TITLE = "title" USER_TAGS = "userTags" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum509(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION = "parentSection" class Enum51(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" IS_SHARED = "isShared" LINKS = "links" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" USER_ROLE = "userRole" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum510(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" IS_SHARED = "isShared" LINKS = "links" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" USER_ROLE = "userRole" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum511(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum512(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc" SECTIONS_URL = "sectionsUrl" SECTIONS_URL_DESC = "sectionsUrl desc" class Enum513(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum514(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum515(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum516(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum517(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" IS_SHARED = "isShared" LINKS = "links" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" USER_ROLE = "userRole" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum518(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum519(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum52(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum520(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum521(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTION_GROUPS_URL_DESC = "sectionGroupsUrl desc" SECTIONS_URL = "sectionsUrl" SECTIONS_URL_DESC = "sectionsUrl desc" class Enum522(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum523(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum524(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum525(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum526(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" IS_DEFAULT = "isDefault" IS_DEFAULT_DESC = "isDefault desc" LINKS = "links" LINKS_DESC = "links desc" PAGES_URL = "pagesUrl" PAGES_URL_DESC = "pagesUrl desc" class Enum527(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum528(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum529(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum53(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum530(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum531(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" ID_DESC = "id desc" SELF = "self" SELF_DESC = "self desc" CREATED_DATE_TIME = "createdDateTime" CREATED_DATE_TIME_DESC = "createdDateTime desc" CREATED_BY = "createdBy" CREATED_BY_DESC = "createdBy desc" DISPLAY_NAME = "displayName" DISPLAY_NAME_DESC = "displayName desc" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_BY_DESC = "lastModifiedBy desc" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" LAST_MODIFIED_DATE_TIME_DESC = "lastModifiedDateTime desc" IS_DEFAULT = "isDefault" IS_DEFAULT_DESC = "isDefault desc" LINKS = "links" LINKS_DESC = "links desc" PAGES_URL = "pagesUrl" PAGES_URL_DESC = "pagesUrl desc" class Enum532(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum533(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum534(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" IS_DEFAULT = "isDefault" LINKS = "links" PAGES_URL = "pagesUrl" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum535(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PAGES = "pages" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" class Enum536(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF = "self" CREATED_DATE_TIME = "createdDateTime" CREATED_BY = "createdBy" DISPLAY_NAME = "displayName" LAST_MODIFIED_BY = "lastModifiedBy" LAST_MODIFIED_DATE_TIME = "lastModifiedDateTime" SECTION_GROUPS_URL = "sectionGroupsUrl" SECTIONS_URL = "sectionsUrl" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum537(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ASTERISK = "*" PARENT_NOTEBOOK = "parentNotebook" PARENT_SECTION_GROUP = "parentSectionGroup" SECTION_GROUPS = "sectionGroups" SECTIONS = "sections" class Enum538(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): ID = "id" SELF
in test_loader: x = x.to(device) loss = model.test({"x": x}) test_loss += loss test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset) print('Test loss: {:.4f}'.format(test_loss)) return test_loss # In[12]: def plot_reconstrunction(x): with torch.no_grad(): z = p.forward(x, compute_jacobian=False) recon_batch = p.inverse(z).view(-1, 1, 28, 28) comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu() return comparison def plot_image_from_latent(z_sample): with torch.no_grad(): sample = p.inverse(z_sample).view(-1, 1, 28, 28).cpu() return sample # In[13]: # writer = SummaryWriter() z_sample = torch.randn(64, z_dim).to(device) _x, _ = iter(test_loader).next() _x = _x.to(device) for epoch in range(1, epochs + 1): train_loss = train(epoch) test_loss = test(epoch) recon = plot_reconstrunction(_x[:8]) sample = plot_image_from_latent(z_sample) # writer.add_scalar('train_loss', train_loss.item(), epoch) # writer.add_scalar('test_loss', test_loss.item(), epoch) # # writer.add_images('Image_from_latent', sample, epoch) # writer.add_images('Image_reconstrunction', recon, epoch) # # writer.close() # In[ ]: # !/usr/bin/env python # coding: utf-8 # # Variational autoencoder (using the Model class) @pytest.mark.performance def test_run_vae_model(): # In[1]: # In[2]: # root = '../data' # transform = transforms.Compose([transforms.ToTensor(), # transforms.Lambda(lambd=lambda x: x.view(-1))]) # kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} # # train_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=True, transform=transform, download=True), # shuffle=True, **kwargs) # test_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=False, transform=transform), # shuffle=False, **kwargs) kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs) # In[3]: from pixyz.distributions import Normal, Bernoulli from pixyz.losses import KullbackLeibler, Expectation as E from pixyz.models import Model from pixyz.utils import print_latex # In[4]: x_dim = 784 z_dim = 64 # inference model q(z|x) class Inference(Normal): def __init__(self): super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q") self.fc1 = nn.Linear(x_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, z_dim) self.fc32 = nn.Linear(512, z_dim) def forward(self, x): h = F.relu(self.fc1(x)) h = F.relu(self.fc2(h)) return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))} # generative model p(x|z) class Generator(Bernoulli): def __init__(self): super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p") self.fc1 = nn.Linear(z_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, x_dim) def forward(self, z): h = F.relu(self.fc1(z)) h = F.relu(self.fc2(h)) return {"probs": torch.sigmoid(self.fc3(h))} p = Generator().to(device) q = Inference().to(device) # prior p(z) prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[z_dim], name="p_{prior}").to(device) # In[5]: print(prior) print_latex(prior) # In[6]: print(p) print_latex(p) # In[7]: print(q) print_latex(q) # In[8]: loss = (KullbackLeibler(q, prior) - E(q, p.log_prob())).mean() print(loss) print_latex(loss) # In[9]: model = Model(loss=loss, distributions=[p, q], optimizer=optim.Adam, optimizer_params={"lr": 1e-3}) print(model) print_latex(model) # In[10]: def train(epoch): train_loss = 0 for x, _ in tqdm(train_loader): x = x.to(device) loss = model.train({"x": x}) train_loss += loss train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset) print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss)) return train_loss # In[11]: def test(epoch): test_loss = 0 for x, _ in test_loader: x = x.to(device) loss = model.test({"x": x}) test_loss += loss test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset) print('Test loss: {:.4f}'.format(test_loss)) return test_loss # In[12]: def plot_reconstrunction(x): with torch.no_grad(): z = q.sample({"x": x}, return_all=False) recon_batch = p.sample_mean(z).view(-1, 1, 28, 28) comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu() return comparison def plot_image_from_latent(z_sample): with torch.no_grad(): sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu() return sample # In[13]: # writer = SummaryWriter('/runs/vae_model') z_sample = 0.5 * torch.randn(64, z_dim).to(device) _x, _ = iter(test_loader).next() _x = _x.to(device) for epoch in range(1, epochs + 1): train_loss = train(epoch) test_loss = test(epoch) recon = plot_reconstrunction(_x[:8]) sample = plot_image_from_latent(z_sample) # writer.add_scalar('train_loss', train_loss.item(), epoch) # writer.add_scalar('test_loss', test_loss.item(), epoch) # # writer.add_images('Image_from_latent', sample, epoch) # writer.add_images('Image_reconstrunction', recon, epoch) # # writer.close() # In[ ]: # In[ ]: # In[ ]: # !/usr/bin/env python # coding: utf-8 # # Variational autoencoder (using the VAE class) @pytest.mark.performance def test_run_vae_with_vae_class(): # * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf) # In[1]: # In[2]: # MNIST # root = '../data' # transform = transforms.Compose([transforms.ToTensor(), # transforms.Lambda(lambd=lambda x: x.view(-1))]) # kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} # # train_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=True, transform=transform, download=True), # shuffle=True, **kwargs) # test_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=False, transform=transform), # shuffle=False, **kwargs) kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs) # In[3]: from pixyz.utils import print_latex # ## Define probability distributions # Prior: $p(z) = \cal N(z; \mu=0, \sigma^2=1)$ # Generator: $p_{\theta}(x|z) = \cal B(x; \lambda = g(z))$ # Inference: $q_{\phi}(z|x) = \cal N(z; \mu=f_\mu(x), \sigma^2=f_{\sigma^2}(x))$ # In[4]: from pixyz.distributions import Normal, Bernoulli x_dim = 784 z_dim = 64 # inference model q(z|x) class Inference(Normal): """ parameterizes q(z | x) infered z follows a Gaussian distribution with mean 'loc', variance 'scale' z ~ N(loc, scale) """ def __init__(self): super(Inference, self).__init__(cond_var=["x"], var=["z"], name="q") self.fc1 = nn.Linear(x_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc31 = nn.Linear(512, z_dim) self.fc32 = nn.Linear(512, z_dim) def forward(self, x): """ given the observation x, return the mean and variance of the Gaussian distritbution """ h = F.relu(self.fc1(x)) h = F.relu(self.fc2(h)) return {"loc": self.fc31(h), "scale": F.softplus(self.fc32(h))} # generative model p(x|z) class Generator(Bernoulli): """ parameterizes the bernoulli(for MNIST) observation likelihood p(x | z) """ def __init__(self): super(Generator, self).__init__(cond_var=["z"], var=["x"], name="p") self.fc1 = nn.Linear(z_dim, 512) self.fc2 = nn.Linear(512, 512) self.fc3 = nn.Linear(512, x_dim) def forward(self, z): """ given the latent variable z, return the probability of Bernoulli distribution """ h = F.relu(self.fc1(z)) h = F.relu(self.fc2(h)) return {"probs": torch.sigmoid(self.fc3(h))} p = Generator().to(device) q = Inference().to(device) # prior p(z) # z ~ N(0, 1) prior = Normal(loc=torch.tensor(0.), scale=torch.tensor(1.), var=["z"], features_shape=[z_dim], name="p_{prior}").to(device) # In[5]: print(prior) print_latex(prior) # In[6]: print(p) print_latex(p) # In[7]: print(q) print_latex(q) # ## Define VAE model using VAE Model Class # - https://docs.pixyz.io/en/latest/models.html#vae # In[8]: from pixyz.losses import KullbackLeibler # define additional loss terms for regularizing representation of latent variables kl = KullbackLeibler(q, prior) print_latex(kl) # In[9]: from pixyz.models import VAE model = VAE(encoder=q, decoder=p, regularizer=kl, optimizer=optim.Adam, optimizer_params={"lr": 1e-3}) print(model) print_latex(model) # ## Define Train and Test loop using model # In[10]: def train(epoch): train_loss = 0 for x, _ in tqdm(train_loader): x = x.to(device) loss = model.train({"x": x}) train_loss += loss train_loss = train_loss * train_loader.batch_size / len(train_loader.dataset) print('Epoch: {} Train loss: {:.4f}'.format(epoch, train_loss)) return train_loss # In[11]: def test(epoch): test_loss = 0 for x, _ in test_loader: x = x.to(device) loss = model.test({"x": x}) test_loss += loss test_loss = test_loss * test_loader.batch_size / len(test_loader.dataset) print('Test loss: {:.4f}'.format(test_loss)) return test_loss # ## Reconstruct image and generate image # In[12]: def plot_reconstrunction(x): """ reconstruct image given input observation x """ with torch.no_grad(): # infer and sampling z using inference model q `.sample()` method z = q.sample({"x": x}, return_all=False) # reconstruct image from inferred latent variable z using Generator model p `.sample_mean()` method recon_batch = p.sample_mean(z).view(-1, 1, 28, 28) # concatenate original image and reconstructed image for comparison comparison = torch.cat([x.view(-1, 1, 28, 28), recon_batch]).cpu() return comparison def plot_image_from_latent(z_sample): """ generate new image given latent variable z """ with torch.no_grad(): # generate image from latent variable z using Generator model p `.sample_mean()` method sample = p.sample_mean({"z": z_sample}).view(-1, 1, 28, 28).cpu() return sample # In[13]: # for visualising in TensorBoard # writer = SummaryWriter() # fix latent variable z for watching generative model improvement z_sample = 0.5 * torch.randn(64, z_dim).to(device) # set-aside observation for watching generative model improvement _x, _ = iter(test_loader).next() _x = _x.to(device) for epoch in range(1, epochs + 1): train_loss = train(epoch) test_loss = test(epoch) recon = plot_reconstrunction(_x[:8]) sample = plot_image_from_latent(z_sample) # writer.add_scalar('train_loss', train_loss.item(), epoch) # writer.add_scalar('test_loss', test_loss.item(), epoch) # # writer.add_images('Image_from_latent', sample, epoch) # writer.add_images('Image_reconstrunction', recon, epoch) # # writer.close() # !/usr/bin/env python # coding: utf-8 # # Variational autoencoder @pytest.mark.performance def test_run_vae(): # * Original paper: Auto-Encoding Variational Bayes (https://arxiv.org/pdf/1312.6114.pdf) # In[1]: # In[2]: # MNIST # root = '../data' # transform = transforms.Compose([transforms.ToTensor(), # transforms.Lambda(lambd=lambda x: x.view(-1))]) # kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} # # train_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=True, transform=transform, download=True), # shuffle=True, **kwargs) # test_loader = torch.utils.data.DataLoader( # datasets.MNIST(root=root, train=False, transform=transform), # shuffle=False, **kwargs) kwargs = {'batch_size': batch_size, 'num_workers': 1, 'pin_memory': True} train_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(mock_mnist, shuffle=False, **kwargs) # In[3]: from pixyz.utils import print_latex # ## Define
<reponame>mpynode/node-designer import unittest import tempfile import os import string import cPickle import codecs try: import maya.standalone maya.standalone.initialize() except: pass import maya.api.OpenMaya as om import maya.cmds as mc from mpylib import MNode from mpylib.nodes import MPyNode class TestMPyNode(unittest.TestCase): TEST_CLASS = MPyNode def setUp(self): mc.file(newFile=True, force=True) def testInit(self): node = self.TEST_CLASS() name = self.TEST_CLASS.__name__[0].lower() + self.TEST_CLASS.__name__[1:] + "1" self.assertEqual(node.getName(), name) same_node = self.TEST_CLASS(node) self.assertEqual(same_node, node) new_node = self.TEST_CLASS(name="test_py_node") self.assertEqual(new_node.getName(), "test_py_node") def testDelete(self): self.testInit() node = self.TEST_CLASS("test_py_node") node.delete() def testLs(self): new_nodes = [] for i in xrange(10): new_nodes.append(self.TEST_CLASS(name="new_node_" + str(i))) results = self.TEST_CLASS.ls() self.assertEqual(len(results), 10) for node in results: self.assertTrue(node in results) self.assertEqual(type(node), self.TEST_CLASS) new_nodes = [] for i in xrange(10): new_nodes.append(TestChildClass(name="new_child_" + str(i))) results = TestChildClass.ls() self.assertEqual(len(results), 10) for node in results: self.assertTrue(node in results) self.assertEqual(type(node), TestChildClass) def testExportToFile(self): for use_binary in (True, False): mc.file(newFile=True, force=True) file_ext = MPyNode.BINARY_FILE_EXT if use_binary else MPyNode.ASCII_FILE_EXT self.testInit() node = self.TEST_CLASS("test_py_node") temp_file = os.environ["TEMP"] + "\\testExportToFile." + file_ext if os.path.exists(temp_file): os.remove(temp_file) self.assertFalse(os.path.exists(temp_file)) node.exportToFile(temp_file, use_binary=use_binary) self.assertTrue(os.path.exists(temp_file)) mc.file(newFile=True, force=True) fh = open(temp_file) try: node = cPickle.load(fh) except Exception, err: fh.close() raise else: fh.close() self.assertTrue(node.isValid()) os.remove(temp_file) def testImportFromFile(self): for use_binary in (True, False): mc.file(newFile=True, force=True) file_ext = MPyNode.BINARY_FILE_EXT if use_binary else MPyNode.ASCII_FILE_EXT self.testInit() node = self.TEST_CLASS("test_py_node") temp_file = os.environ["TEMP"] + "\\testExportToFile." + file_ext if os.path.exists(temp_file): os.remove(temp_file) self.assertFalse(os.path.exists(temp_file)) node.exportToFile(temp_file, use_binary=use_binary) self.assertTrue(os.path.exists(temp_file)) mc.file(newFile=True, force=True) node = MPyNode.importFromFile(temp_file) self.assertTrue(node.isValid()) os.remove(temp_file) def testSetExpression(self): node = self.TEST_CLASS() exp_str = "i = 1 + 1" node.setExpression(exp_str) self.assertEqual(node.getExpression(), exp_str) exp_str = "self.test_init = 0.0" node.setExpression(exp_str) def testGetExpression(self): self.testSetExpression() def testAddInputAttr(self): node = self.TEST_CLASS(name="test_node") data_check_dict = {} i = 1 for is_array in (False, True): for attr_type in MPyNode._NEW_INPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix node.addInputAttr(attr_name, attr_type, is_array=is_array) self.assertTrue(node.hasAttr(attr_name)) if is_array: self.assertEqual(node.getAttr(attr_name, size=True), 0) else: self.assertEqual(node.getAttr(attr_name, size=True), 1) ##---test internal data---## data_check_dict[attr_name] = [attr_type] internal_str = node._getInternalInputString() self.assertTrue(internal_str is not None) interal_dict = cPickle.loads(codecs.decode(internal_str.encode(), "base64")) self.assertEqual(type(interal_dict), dict) self.assertEqual(len(interal_dict), i) i += 1 for internal_attr, internal_type in interal_dict.items(): self.assertSequenceEqual(interal_dict[internal_attr], data_check_dict[internal_attr]) return data_check_dict def testRenameInputAttr(self): return self._testRenameAttr() def testRenameOutputAttr(self): return self._testRenameAttr(is_input=False) def _testRenameAttr(self, is_input=True): node = self.TEST_CLASS(name="test_node") data_check_dict = {} i = 1 in_node = self.TEST_CLASS(name="in_node") for is_array in (False, True): attr_types = MPyNode._NEW_INPUT_TYPES.keys() if is_input else MPyNode._NEW_OUTPUT_TYPES.keys() for attr_type in attr_types: name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix attr_renamed = "testRenamed_" + attr_type + name_suffix add_func = getattr(node, "addInputAttr") if is_input else getattr(node, "addOutputAttr") rename_func = getattr(node, "renameInputAttr") if is_input else getattr(node, "renameOutputAttr") add_func(attr_name, attr_type, is_array=is_array) rename_func(attr_name, attr_renamed) self.assertFalse(node.hasAttr(attr_name)) self.assertTrue(node.hasAttr(attr_renamed)) if is_array: self.assertEqual(node.getAttr(attr_renamed, size=True), 0) else: self.assertEqual(node.getAttr(attr_renamed, size=True), 1) ##---test internal data---## data_check_dict[attr_renamed] = [attr_type] internal_str = node._getInternalInputString() if is_input else node._getInternalOutputString() self.assertTrue(internal_str is not None) interal_dict = cPickle.loads(codecs.decode(internal_str.encode(), "base64")) self.assertEqual(type(interal_dict), dict) self.assertEqual(len(interal_dict), i) self.assertFalse(interal_dict.has_key(attr_name)) i += 1 for internal_attr, internal_type in interal_dict.items(): self.assertSequenceEqual(interal_dict[internal_attr], data_check_dict[internal_attr]) return data_check_dict def testDeleteInputAttr(self): data_check_dict = self.testAddInputAttr() i = len(data_check_dict) node = self.TEST_CLASS("test_node") for is_array in (False, True): for attr_type in MPyNode._NEW_INPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix self.assertTrue(node.hasAttr(attr_name)) node.deleteInputAttr(attr_name) self.assertFalse(node.hasAttr(attr_name)) ##---test internal data---## internal_str = node._getInternalInputString() del(data_check_dict[attr_name]) self.assertTrue(internal_str is not None) interal_dict = cPickle.loads(codecs.decode(internal_str.encode(), "base64")) i -= 1 if i > 0: self.assertEqual(type(interal_dict), dict) self.assertEqual(len(interal_dict), i) for internal_attr, internal_type in interal_dict.items(): self.assertSequenceEqual(interal_dict[internal_attr], data_check_dict[internal_attr]) else: self.assertTrue(interal_dict is None) def testDeleteOutputAttr(self): data_check_dict = self.testAddOutputAttr() i = len(data_check_dict) node = self.TEST_CLASS("test_node") for is_array in (False, True): for attr_type in MPyNode._NEW_OUTPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix self.assertTrue(node.hasAttr(attr_name)) node.deleteOutputAttr(attr_name) self.assertFalse(node.hasAttr(attr_name)) ##---test internal data---## internal_str = node._getInternalOutputString() del(data_check_dict[attr_name]) self.assertTrue(internal_str is not None) interal_dict = cPickle.loads(codecs.decode(internal_str.encode(), "base64")) i -= 1 if i > 0: self.assertEqual(type(interal_dict), dict) self.assertEqual(len(interal_dict), i) for internal_attr, internal_type in interal_dict.items(): self.assertSequenceEqual(interal_dict[internal_attr], data_check_dict[internal_attr]) else: self.assertTrue(interal_dict is None) def testAddOutputAttr(self): node = self.TEST_CLASS(name="test_node") data_check_dict = {} i = 1 for is_array in (False, True): for attr_type in MPyNode._NEW_OUTPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix node.addOutputAttr(attr_name, attr_type, is_array=is_array) self.assertTrue(node.hasAttr(attr_name)) if is_array: self.assertEqual(node.getAttr(attr_name, size=True), 0) else: self.assertEqual(node.getAttr(attr_name, size=True), 1) ##---test internal data---## data_check_dict[attr_name] = [attr_type] internal_str = node._getInternalOutputString() self.assertTrue(internal_str is not None) interal_dict = cPickle.loads(codecs.decode(internal_str.encode(), "base64")) self.assertEqual(type(interal_dict), dict) self.assertEqual(len(interal_dict), i) i += 1 for internal_attr, internal_type in interal_dict.items(): self.assertSequenceEqual(interal_dict[internal_attr], data_check_dict[internal_attr]) return data_check_dict def testListInputAttrs(self): self.testAddInputAttr() node = self.TEST_CLASS("test_node") attr_list = [] for is_array in (False, True): for attr_type in MPyNode._NEW_INPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix attr_list.append(attr_name) results = node.listInputAttrs() for attr in results: self.assertTrue(attr in attr_list) def testAddStoredVariable(self): self.testInit() node = self.TEST_CLASS("test_py_node") for var_name in string.ascii_lowercase: node.addStoredVariable(var_name) def testListStoredVariables(self): self.testAddStoredVariable() node = self.TEST_CLASS("test_py_node") stored_vars = node.listStoredVariables() for var_name, letter in map(None, stored_vars, string.ascii_lowercase): self.assertEqual(var_name, letter) def testHasStoredVariable(self): self.testAddStoredVariable() node = self.TEST_CLASS("test_py_node") self.assertFalse(node.hasStoredVariable("foo")) for var_name in string.ascii_lowercase: self.assertTrue(node.hasStoredVariable(var_name)) def testSetStoredVariable(self): self.testInit() node = self.TEST_CLASS("test_py_node") for i, var_name in enumerate(string.ascii_lowercase): node.setStoredVariable(var_name, {var_name:{unicode(var_name):1}}) def testGetStoredVariables(self): self.testSetStoredVariable() node = self.TEST_CLASS("test_py_node") var_map = node.getStoredVariables() for i, var_name in enumerate(string.ascii_lowercase): self.assertTrue(var_name in var_map) self.assertEqual(type(var_map[var_name]), dict) self.assertEqual(var_map[var_name][var_name], {unicode(var_name):1}) def testRemoveStoredVariable(self): self.testAddStoredVariable() node = self.TEST_CLASS("test_py_node") removed_var = "f" var_names = node.listStoredVariables() self.assertTrue(removed_var in var_names) node.removeStoredVariable(removed_var) var_names = node.listStoredVariables() self.assertFalse(removed_var in var_names) def testFunctionalStoredVariables(self): in_node = MNode.createNode("transform", name="in_node") out_node = MNode.createNode("transform", name="out_node") node = self.TEST_CLASS(name="test_py_node") node.addStoredVariable("testVar") node.addInputAttr("inAttr", "float", keyable=True, defaultValue=-1.0) node.addOutputAttr("outAttr", "float") node.setExpression("if not hasattr(self, 'testVar'):\n\tself.testVar = 1.0\nelse:\n\tself.testVar += 1.0\noutAttr = self.testVar\n") in_node.connectAttr("translateX", node, "inAttr") node.connectAttr("outAttr", out_node, "translateX") in_node.setAttr("translateX", 1.0) out_value = out_node.getAttr("translateX") self.assertEqual(out_value, 1.0) out_file_name = os.environ["TEMP"].replace("\\", "/") + "/test_mpynode.ma" mc.file(rename=out_file_name) mc.file(save=True, force=True, type="mayaAscii") for i in range(2, 100): mc.file(out_file_name, open=True, force=True) in_node = MNode("in_node") out_node = MNode("out_node") in_node.setAttr("translateX", float(i) + 1.0) out_value = out_node.getAttr("translateX") self.assertEqual(out_value, float(i)) mc.file(save=True, force=True, type="mayaAscii") def testListOutputAttrs(self): self.testAddOutputAttr() node = self.TEST_CLASS("test_node") attr_list = [] for is_array in (False, True): for attr_type in MPyNode._NEW_OUTPUT_TYPES.keys(): name_suffix = "" if not is_array else "_array" attr_name = "test_" + attr_type + name_suffix attr_list.append(attr_name) results = node.listOutputAttrs() for attr in results: self.assertTrue(attr in attr_list) def testGetInputAttrMap(self): self.testAddInputAttr() node = self.TEST_CLASS("test_node") attr_map = node.getInputAttrMap() for attr_name, attr_data in attr_map.items(): attr_tokens = attr_name.split("_") self.assertTrue(len(attr_tokens) in (2, 3)) self.assertEqual(attr_tokens[1], attr_data[MPyNode._ATTR_MAP_TYPE_KEY]) if len(attr_tokens) == 2: self.assertFalse(attr_data.has_key(MPyNode.ATTR_MAP_ARRAY_KEY)) else: self.assertTrue(attr_data[MPyNode.ATTR_MAP_ARRAY_KEY]) def testGetOutputAttrMap(self): self.testAddOutputAttr() node = self.TEST_CLASS("test_node") attr_map = node.getOutputAttrMap() for attr_name, attr_data in attr_map.items(): attr_tokens = attr_name.split("_") self.assertTrue(len(attr_tokens) in (2, 3)) self.assertEqual(attr_tokens[1], attr_data[MPyNode._ATTR_MAP_TYPE_KEY]) if len(attr_tokens) == 2: self.assertFalse(attr_data.has_key(MPyNode.ATTR_MAP_ARRAY_KEY)) else: self.assertTrue(attr_data[MPyNode.ATTR_MAP_ARRAY_KEY]) def testListValidInputTypes(self): attr_types = MPyNode.listValidInputTypes() self.assertEqual(type(attr_types), tuple) self.assertEqual(len(attr_types), len(MPyNode._NEW_INPUT_TYPES)) attr_keys = MPyNode._NEW_INPUT_TYPES.keys() attr_keys.sort() self.assertSequenceEqual(attr_types, attr_keys) def testListValidOutputTypes(self): attr_types = MPyNode.listValidOutputTypes() self.assertEqual(type(attr_types), tuple) self.assertEqual(len(attr_types), len(MPyNode._NEW_OUTPUT_TYPES)) attr_keys = MPyNode._NEW_OUTPUT_TYPES.keys() attr_keys.sort() self.assertSequenceEqual(attr_types, attr_keys) def testFunctionalFloat(self): for is_array in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node = MNode(mc.spaceLocator(name="out_node")[0]) attr_type = "float" input_attr = "translateX" output_attr = "translateX" output_attr_2 = "translateY" anim_values = (1.0, 100.0) self._runFunctionalTest(in_node, out_node, attr_type, input_attr, output_attr, input_attr, anim_values, is_array, output_attr_2=output_attr_2) def testFunctionalEnum(self): for is_array in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node = MNode(mc.spaceLocator(name="out_node")[0]) attr_type = "enum" input_attr = "testEnum" output_attr = "testEnum" output_attr_2 = "testEnum2" enum_names = ":".join(["enum" + str(i) for i in xrange(100)]) attr_kargs={"enumName":enum_names} anim_values = (1, 100) self._runFunctionalTest(in_node, out_node, attr_type, input_attr, output_attr, input_attr, anim_values, is_array, output_attr_2=output_attr_2, attr_kargs=attr_kargs) def testFunctionalInt(self): for is_array in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node = MNode(mc.spaceLocator(name="out_node")[0]) attr_type = "int" input_attr = "testInt" output_attr = "testInt" output_attr_2 = "testInt2" anim_values = (1, 100) self._runFunctionalTest(in_node, out_node, attr_type, input_attr, output_attr, input_attr, anim_values, is_array, output_attr_2=output_attr_2) def testFunctionalBool(self): for is_array in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node = MNode(mc.spaceLocator(name="out_node")[0]) attr_type = "bool" input_attr = "testBool" output_attr = "testBool" out_attr_2 = "testBool2" anim_values = (True, False) self._runFunctionalTest(in_node, out_node, attr_type, input_attr, output_attr, input_attr, anim_values, is_array, output_attr_2=out_attr_2) def testFunctionalVector(self): for is_array in (False, True): for use_comp_names in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node = MNode(mc.spaceLocator(name="out_node")[0]) attr_type = "vector" input_attr = "translate" output_attr = "scale" output_attr_2 = "translate" anim_values = ((1.0, 1.0, 1.0), (100.0, 200.0, 300.0)) self._runFunctionalTest(in_node, out_node, attr_type, input_attr, output_attr, input_attr, anim_values, is_array, output_attr_2=output_attr_2, use_comp_names=use_comp_names) def testFunctionalMatrix(self): for is_array in (False, True): mc.file(newFile=True, force=True) mc.playbackOptions(e=True, minTime=1, maxTime=100) in_node = MNode(mc.spaceLocator(name="in_node")[0]) out_node
<reponame>Pyifan/testplan """PyTest test runner.""" import collections import inspect import os import re import traceback import pytest import six from schema import Or from testplan.testing import base as testing from testplan.common.config import ConfigOption from testplan.testing.base import TestResult from testplan.testing.multitest.entries import assertions from testplan.testing.multitest.entries.base import Log as LogAssertion from testplan.testing.multitest.result import Result as MultiTestResult from testplan.testing.multitest.entries.schemas.base import ( registry as schema_registry, ) from testplan.testing.multitest.entries.stdout.base import ( registry as stdout_registry, ) from testplan.report import TestGroupReport, TestCaseReport, Status from testplan.common.utils import validation # Regex for parsing suite and case name and case parameters _CASE_REGEX = re.compile( r"^(?P<suite_name>.+)::" r"(?P<case_name>[^\[]+)(?:\[(?P<case_params>.+)\])?$", re.DOTALL, ) class PyTestConfig(testing.TestConfig): """ Configuration object for :py:class:`~testplan.testing.py_test.PyTest` test runner. """ @classmethod def get_options(cls): return { "target": Or(str, [str]), ConfigOption("select", default=""): str, ConfigOption("extra_args", default=None): Or([str], None), ConfigOption( "result", default=MultiTestResult ): validation.is_subclass(MultiTestResult), } class PyTest(testing.Test): """ PyTest plugin for Testplan. Allows tests written for PyTest to be run from Testplan, with the test results logged and included in the Testplan report. :param name: Test instance name. Also used as uid. :type name: ``str`` :param target: Target of PyTest configuration. :type target: ``str`` or ``list`` of ``str`` :param description: Description of test instance. :type description: ``str`` :param select: Selection of PyTest configuration. :type select: ``str`` :param extra_args: Extra arguments passed to pytest. :type extra_args: ``NoneType`` or ``list`` of ``str`` :param result: Result that contains assertion entries. :type result: :py:class:`~testplan.testing.multitest.result.Result` Also inherits all :py:class:`~testplan.testing.base.Test` options. """ CONFIG = PyTestConfig def __init__( self, name, target, description=None, select="", extra_args=None, result=MultiTestResult, **options ): options.update(self.filter_locals(locals())) super(PyTest, self).__init__(**options) # Initialise a seperate plugin object to pass to PyTest. This avoids # namespace clashes with the PyTest object, since PyTest will scan for # methods that look like hooks in the plugin. quiet = not self._debug_logging_enabled self._pytest_plugin = _ReportPlugin(self, self.report, quiet) self._collect_plugin = _CollectPlugin(quiet) self._pytest_args = self._build_pytest_args() # Map from testsuite/testcase name to nodeid. Filled out after # tests are collected via dry_run(). self._nodeids = None def main_batch_steps(self): """Specify the test steps: run the tests, then log the results.""" self._add_step(self.run_tests) self._add_step(self.log_test_results, top_down=False) def setup(self): """Setup the PyTest plugin for the suite.""" self._pytest_plugin.setup() def run_tests(self): """Run pytest and wait for it to terminate.""" # Execute pytest with self as a plugin for hook support return_code = pytest.main( self._pytest_args, plugins=[self._pytest_plugin] ) if return_code == 5: self.result.report.status_override = Status.UNSTABLE self.logger.warning("No tests were run") elif return_code != 0: self.result.report.status_override = Status.FAILED self.logger.error("pytest exited with return code %d", return_code) def _collect_tests(self): """Collect test items but do not run any.""" return_code = pytest.main( self._pytest_args + ["--collect-only"], plugins=[self._collect_plugin], ) if return_code not in (0, 5): # rc 5: no tests were run raise RuntimeError( "Collection failure, exit code = {}".format(return_code) ) return self._collect_plugin.collected def get_test_context(self): """ Inspect the test suites and cases by running PyTest with the --collect-only flag and passing in our collection plugin. :return: List containing pairs of suite name and testcase names. :rtype: List[Tuple[str, List[str]]] """ try: collected = self._collect_tests() except RuntimeError: self.result.report.status_override = Status.ERROR self.logger.exception("Failed to collect tests.") return [] # The plugin will handle converting PyTest tests into suites and # testcase names. suites = collections.defaultdict(set) for item in collected: suite_name, case_name, _ = _case_parse(item.nodeid) suites[suite_name].add(case_name) return [ (suite, list(testcases)) for suite, testcases in suites.items() ] def dry_run(self): """ Collect tests and build a report tree skeleton, but do not run any tests. """ collected = self._collect_tests() test_report = self._new_test_report() self._nodeids = { "testsuites": {}, "testcases": collections.defaultdict(dict), } for item in collected: _add_empty_testcase_report(item, test_report, self._nodeids) result = TestResult() result.report = test_report return result def run_testcases_iter(self, testsuite_pattern="*", testcase_pattern="*"): """ Run testcases matching the given patterns and yield testcase reports. :param testsuite_pattern: Filter pattern for testsuite level. :type testsuite_pattern: ``str`` :param testcase_pattern: Filter pattern for testcase level. :type testsuite_pattern: ``str`` :yield: generate tuples containing testcase reports and a list of the UIDs required to merge this into the main report tree, starting with the UID of this test. """ if not self._nodeids: # Need to collect the tests so we know the nodeids for each # testsuite/case. self.dry_run() test_report = self._new_test_report() quiet = not self._debug_logging_enabled pytest_plugin = _ReportPlugin(self, test_report, quiet) pytest_plugin.setup() pytest_args = self._build_iter_pytest_args( testsuite_pattern, testcase_pattern ) self.logger.debug("Running PyTest with args: %r", pytest_args) return_code = pytest.main(pytest_args, plugins=[pytest_plugin]) self.logger.debug("Pytest exit code: %d", return_code) for suite_report in test_report: for child_report in suite_report: if isinstance(child_report, TestCaseReport): yield ( child_report, [test_report.uid, suite_report.uid], ) elif isinstance(child_report, TestGroupReport): if child_report.category != "parametrization": raise RuntimeError( "Unexpected report category: {}".format( child_report.category ) ) for testcase_report in child_report: yield ( testcase_report, [ test_report.uid, suite_report.uid, child_report.uid, ], ) else: raise TypeError( "Unexpected report type: {}".format( type(testcase_report) ) ) def _build_iter_pytest_args(self, testsuite_pattern, testcase_pattern): """ Build the PyTest args for running a particular set of testsuites and testcases as specified. """ if self._nodeids is None: raise RuntimeError("Need to call dry_run() first") if testsuite_pattern == "*" and testcase_pattern == "*": if isinstance(self.cfg.target, six.string_types): pytest_args = [self.cfg.target] else: pytest_args = self.cfg.target[:] elif testcase_pattern == "*": pytest_args = [self._nodeids["testsuites"][testsuite_pattern]] else: pytest_args = [ self._nodeids["testcases"][testsuite_pattern][testcase_pattern] ] if self.cfg.extra_args: pytest_args.extend(self.cfg.extra_args) return pytest_args def _build_pytest_args(self): """ :return: a list of the args to be passed to PyTest :rtype: List[str] """ if isinstance(self.cfg.target, six.string_types): pytest_args = [self.cfg.target] else: pytest_args = self.cfg.target[:] if self.cfg.select: pytest_args.extend(["-k", self.cfg.select]) if self.cfg.extra_args: pytest_args.extend(self.cfg.extra_args) return pytest_args class _ReportPlugin(object): """ Plugin object passed to PyTest. Contains hooks used to update the Testplan report with the status of testcases. """ def __init__(self, parent, report, quiet): self._parent = parent self._report = report self._quiet = quiet # Collection of suite reports - will be intialised by the setup() # method. self._suite_reports = None # The current working testcase report. It needs to be stored on this # object since it is set and read by different callback hooks. self._current_case_report = None # Result object which supports various assertions like in MultiTest. # Its entries will later be added to current testcase report. self._current_result_obj = None # Create fixture function for interface self._fixtures_init() def _fixtures_init(self): """ Register fixtures with pytest. """ @pytest.fixture def result(): """ Return the result object for the current test case. :return: the result object for the current test case :rtype: ``Result`` """ return self._current_result_obj @pytest.fixture def env(): """ Return the testing environment. :return: the testing environment :rtype: ``Environment`` """ return self._parent.resources # PyTest picks up fixtures from all files it loads (including plugins) self.result = result self.env = env def setup(self): """Set up environment as required.""" self._suite_reports = collections.defaultdict(collections.OrderedDict) def case_report(self, suite_name, case_name, case_params): """ Return the case report for the specified suite and case name, creating it first if necessary. :param suite_name: the suite name to get the report for :type suite_name: ``str`` :param case_name: the case name to get the report for :type case_name: ``str`` :param case_params: the case parameters to get the report for :type case_params: ``str`` or ``NoneType`` :return: the case report :rtype: :py:class:`testplan.report.testing.TestCaseReport` """ if case_params is None: report = self._suite_reports[suite_name].get(case_name) if report is None: report = TestCaseReport(case_name, uid=case_name) self._suite_reports[suite_name][case_name] = report return report else: group_report = self._suite_reports[suite_name].get(case_name) if group_report is None: # create group report for parametrized testcases group_report = TestGroupReport( name=case_name, uid=case_name, category="parametrization" ) self._suite_reports[suite_name][case_name] = group_report case_name = "{}[{}]".format(case_name, case_params) try: report = group_report.get_by_uid(case_name) except: # create report of parametrized testcase report = TestCaseReport(case_name, uid=case_name) group_report.append(report) return report def pytest_runtest_setup(self, item): """ Hook called by pytest to set up a test. :param item: the test item to set up (see pytest documentation) """ # Extract suite name, case name and parameters suite_name, case_name, case_params = _case_parse(item.nodeid) report = self.case_report(suite_name, case_name, case_params) try: func_doc = item.function.__doc__ except AttributeError: func_doc = None if func_doc is not None: report.description = os.linesep.join( " {}".format(line) for line in inspect.getdoc(item.function).split(os.linesep) ) self._current_case_report = report self._current_result_obj = self._parent.cfg.result( stdout_style=self._parent.stdout_style, _scratch=self._parent.scratch, ) def pytest_runtest_teardown(self, item): """ Hook called by pytest to tear down a test. :param item: the test item to tear down (see pytest documentation) """ self._current_case_report = None self._current_result_obj = None def pytest_runtest_logreport(self, report): """ Hook called by pytest to report on the result of a test. :param report: the test report for the item just tested (see pytest
<filename>venv/Lib/site-packages/IPython/kernel/zmq/ipkernel.py<gh_stars>0 #!/usr/bin/env python """An interactive kernel that talks to frontends over 0MQ.""" #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import print_function # Standard library imports import sys import time import traceback import logging import uuid from datetime import datetime from signal import ( signal, default_int_handler, SIGINT ) # System library imports import zmq from zmq.eventloop import ioloop from zmq.eventloop.zmqstream import ZMQStream # Local imports from IPython.config.configurable import Configurable from IPython.core.error import StdinNotImplementedError from IPython.core import release from IPython.utils import py3compat from IPython.utils.py3compat import builtin_mod, unicode_type, string_types from IPython.utils.jsonutil import json_clean from IPython.utils.traitlets import ( Any, Instance, Float, Dict, List, Set, Integer, Unicode, Type, Bool, ) from .serialize import serialize_object, unpack_apply_message from .session import Session from .zmqshell import ZMQInteractiveShell #----------------------------------------------------------------------------- # Main kernel class #----------------------------------------------------------------------------- protocol_version = list(release.kernel_protocol_version_info) ipython_version = list(release.version_info) language_version = list(sys.version_info[:3]) class Kernel(Configurable): #--------------------------------------------------------------------------- # Kernel interface #--------------------------------------------------------------------------- # attribute to override with a GUI eventloop = Any(None) def _eventloop_changed(self, name, old, new): """schedule call to eventloop from IOLoop""" loop = ioloop.IOLoop.instance() loop.add_callback(self.enter_eventloop) shell = Instance('IPython.core.interactiveshell.InteractiveShellABC') shell_class = Type(ZMQInteractiveShell) session = Instance(Session) profile_dir = Instance('IPython.core.profiledir.ProfileDir') shell_streams = List() control_stream = Instance(ZMQStream) iopub_socket = Instance(zmq.Socket) stdin_socket = Instance(zmq.Socket) log = Instance(logging.Logger) user_module = Any() def _user_module_changed(self, name, old, new): if self.shell is not None: self.shell.user_module = new user_ns = Instance(dict, args=None, allow_none=True) def _user_ns_changed(self, name, old, new): if self.shell is not None: self.shell.user_ns = new self.shell.init_user_ns() # identities: int_id = Integer(-1) ident = Unicode() def _ident_default(self): return unicode_type(uuid.uuid4()) # Private interface _darwin_app_nap = Bool(True, config=True, help="""Whether to use appnope for compatiblity with OS X App Nap. Only affects OS X >= 10.9. """ ) # Time to sleep after flushing the stdout/err buffers in each execute # cycle. While this introduces a hard limit on the minimal latency of the # execute cycle, it helps prevent output synchronization problems for # clients. # Units are in seconds. The minimum zmq latency on local host is probably # ~150 microseconds, set this to 500us for now. We may need to increase it # a little if it's not enough after more interactive testing. _execute_sleep = Float(0.0005, config=True) # Frequency of the kernel's event loop. # Units are in seconds, kernel subclasses for GUI toolkits may need to # adapt to milliseconds. _poll_interval = Float(0.05, config=True) # If the shutdown was requested over the network, we leave here the # necessary reply message so it can be sent by our registered atexit # handler. This ensures that the reply is only sent to clients truly at # the end of our shutdown process (which happens after the underlying # IPython shell's own shutdown). _shutdown_message = None # This is a dict of port number that the kernel is listening on. It is set # by record_ports and used by connect_request. _recorded_ports = Dict() # A reference to the Python builtin 'raw_input' function. # (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3) _sys_raw_input = Any() _sys_eval_input = Any() # set of aborted msg_ids aborted = Set() def __init__(self, **kwargs): super(Kernel, self).__init__(**kwargs) # Initialize the InteractiveShell subclass self.shell = self.shell_class.instance(parent=self, profile_dir = self.profile_dir, user_module = self.user_module, user_ns = self.user_ns, kernel = self, ) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('pyout') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.shell.data_pub.session = self.session self.shell.data_pub.pub_socket = self.iopub_socket # TMP - hack while developing self.shell._reply_content = None # Build dict of handlers for message types msg_types = [ 'execute_request', 'complete_request', 'object_info_request', 'history_request', 'kernel_info_request', 'connect_request', 'shutdown_request', 'apply_request', ] self.shell_handlers = {} for msg_type in msg_types: self.shell_handlers[msg_type] = getattr(self, msg_type) comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ] comm_manager = self.shell.comm_manager for msg_type in comm_msg_types: self.shell_handlers[msg_type] = getattr(comm_manager, msg_type) control_msg_types = msg_types + [ 'clear_request', 'abort_request' ] self.control_handlers = {} for msg_type in control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) def dispatch_control(self, msg): """dispatch control requests""" idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Control Message", exc_info=True) return self.log.debug("Control received: %s", msg) header = msg['header'] msg_id = header['msg_id'] msg_type = header['msg_type'] handler = self.control_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: handler(self.control_stream, idents, msg) except Exception: self.log.error("Exception in control handler:", exc_info=True) def dispatch_shell(self, stream, msg): """dispatch shell requests""" # flush control requests first if self.control_stream: self.control_stream.flush() idents,msg = self.session.feed_identities(msg, copy=False) try: msg = self.session.unserialize(msg, content=True, copy=False) except: self.log.error("Invalid Message", exc_info=True) return header = msg['header'] msg_id = header['msg_id'] msg_type = msg['header']['msg_type'] # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each # handler prints its message at the end. self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type) self.log.debug(' Content: %s\n --->\n ', msg['content']) if msg_id in self.aborted: self.aborted.remove(msg_id) # is it safe to assume a msg_id will not be resubmitted? reply_type = msg_type.split('_')[0] + '_reply' status = {'status' : 'aborted'} md = {'engine' : self.ident} md.update(status) reply_msg = self.session.send(stream, reply_type, metadata=md, content=status, parent=msg, ident=idents) return handler = self.shell_handlers.get(msg_type, None) if handler is None: self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type) else: # ensure default_int_handler during handler call sig = signal(SIGINT, default_int_handler) try: handler(stream, idents, msg) except Exception: self.log.error("Exception in message handler:", exc_info=True) finally: signal(SIGINT, sig) def enter_eventloop(self): """enter eventloop""" self.log.info("entering eventloop %s", self.eventloop) for stream in self.shell_streams: # flush any pending replies, # which may be skipped by entering the eventloop stream.flush(zmq.POLLOUT) # restore default_int_handler signal(SIGINT, default_int_handler) while self.eventloop is not None: try: self.eventloop(self) except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") continue else: # eventloop exited cleanly, this means we should stop (right?) self.eventloop = None break self.log.info("exiting eventloop") def start(self): """register dispatchers for streams""" self.shell.exit_now = False if self.control_stream: self.control_stream.on_recv(self.dispatch_control, copy=False) def make_dispatcher(stream): def dispatcher(msg): return self.dispatch_shell(stream, msg) return dispatcher for s in self.shell_streams: s.on_recv(make_dispatcher(s), copy=False) # publish idle status self._publish_status('starting') def do_one_iteration(self): """step eventloop just once""" if self.control_stream: self.control_stream.flush() for stream in self.shell_streams: # handle at most one request per iteration stream.flush(zmq.POLLIN, 1) stream.flush(zmq.POLLOUT) def record_ports(self, ports): """Record the ports that this kernel is using. The creator of the Kernel instance must call this methods if they want the :meth:`connect_request` method to return the port numbers. """ self._recorded_ports = ports #--------------------------------------------------------------------------- # Kernel request handlers #--------------------------------------------------------------------------- def _make_metadata(self, other=None): """init metadata dict, for execute/apply_reply""" new_md = { 'dependencies_met' : True, 'engine' : self.ident, 'started': datetime.now(), } if other: new_md.update(other) return new_md def _publish_pyin(self, code, parent, execution_count): """Publish the code request on the pyin stream.""" self.session.send(self.iopub_socket, u'pyin', {u'code':code, u'execution_count': execution_count}, parent=parent, ident=self._topic('pyin') ) def _publish_status(self, status, parent=None): """send status (busy/idle) on IOPub""" self.session.send(self.iopub_socket, u'status', {u'execution_state': status}, parent=parent, ident=self._topic('status'), ) def execute_request(self, stream, ident, parent): """handle an execute_request""" self._publish_status(u'busy', parent) try: content = parent[u'content'] code = py3compat.cast_unicode_py2(content[u'code']) silent = content[u'silent'] store_history = content.get(u'store_history', not silent) except: self.log.error("Got bad msg: ") self.log.error("%s", parent) return md = self._make_metadata(parent['metadata']) shell = self.shell # we'll need this a lot here # Replace raw_input. Note that is not sufficient to replace # raw_input in the user namespace. if content.get('allow_stdin', False): raw_input = lambda prompt='': self._raw_input(prompt, ident, parent) input = lambda prompt='': eval(raw_input(prompt)) else: raw_input = input = lambda prompt='' : self._no_raw_input() if py3compat.PY3: self._sys_raw_input = builtin_mod.input builtin_mod.input = raw_input else: self._sys_raw_input = builtin_mod.raw_input self._sys_eval_input = builtin_mod.input builtin_mod.raw_input = raw_input builtin_mod.input = input # Set the parent message of the display hook and out streams. shell.set_parent(parent) # Re-broadcast our input for the benefit of listening clients, and # start computing output if not silent: self._publish_pyin(code, parent, shell.execution_count) reply_content = {} # FIXME: the shell calls the exception handler itself. shell._reply_content = None try: shell.run_cell(code, store_history=store_history, silent=silent) except: status = u'error' # FIXME: this code right now isn't being used yet by default, # because the run_cell() call above directly fires off exception # reporting. This code, therefore, is only active in the scenario # where runlines itself has an unhandled exception. We need to # uniformize this, for all exception construction to come from a # single location in the codbase. etype, evalue, tb
<reponame>Arent128/npc """ Package for command functions and their helpers These functions handle the real work of NPC. They can be called on their own without going through the CLI. """ import json from collections import Counter from os import makedirs, rmdir, getcwd from pathlib import Path from shutil import move as shmove import itertools import npc from npc import formatters, linters, parser, settings from npc.util import flatten, result from npc.character import Character, CharacterEncoder from . import create_character, listing, util, story def reorg(*search, ignore=None, purge=False, verbose=False, commit=False, **kwargs): """ Move character files into the correct paths. Character files are moved so that their path matches the ideal path as closely as possible. No new directories are created. Args: search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore purge (bool): Whether empty directories should be deleted after all files have been moved. verbose (bool): Whether to print changes as they are made commit (bool): Whether to actually move files around prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will be empty. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('reorg')) show_changes = verbose or not commit changelog = [] base_path = Path(prefs.get('paths.required.characters')) if not base_path.exists(): return result.FSError(errmsg="Cannot access '{}'".format(base_path)) if show_changes: changelog.append("Move characters") for parsed_character in parser.get_characters(flatten(search), ignore): if parsed_character.tags('keep').present: continue new_path = Path(util.create_path_from_character(parsed_character, base_path=base_path)) parsed_path = Path(parsed_character.path) if not new_path.resolve().samefile(parsed_path.resolve()): if show_changes: changelog.append("* Move {} to {}".format(parsed_path, new_path)) if commit: try: shmove(str(parsed_path), new_path) except OSError as e: if show_changes: changelog.append("\t- dest path already exists; skipping") if purge: if show_changes: changelog.append("Purge empty directories") for empty_path in util.find_empty_dirs(base_path): if show_changes: changelog.append("* Remove empty directory {}".format(empty_path)) if commit: rmdir(empty_path) return result.Success(printables=changelog) def dump(*search, ignore=None, do_sort=False, metadata=False, outfile=None, **kwargs): """ Dump the raw character data, unaltered. Always formats the data as json. Args: search (List): Paths to search for character files ignore (List): Paths to ignore do_sort (bool): Whether to sort the characters before dumping metadata (bool): Whether to prepend metadata to the output outfile (string|None): Filename to put the dumped data. None and "-" print to stdout. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. If outfile pointed to a real file, the openable attribute will contain that filename. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('dump')) sort_by = kwargs.get('sort_by', prefs.get('dump.sort_by')) characters = parser.get_characters(flatten(search), ignore) if do_sort: sorter = util.character_sorter.CharacterSorter(sort_by, prefs=prefs) characters = sorter.sort(characters) characters = [c.dump() for c in characters] # make some json if metadata: meta = { 'meta': True, **prefs.get_metadata('json') } characters = itertools.chain([meta], characters) with util.smart_open(outfile) as outstream: json.dump([c for c in characters], outstream, cls=CharacterEncoder) openable = [outfile] if outfile and outfile != '-' else None return result.Success(openable=openable) def lint(*search, ignore=None, fix=False, strict=False, report=True, **kwargs): """ Check character files for completeness and correctness. This function checks that every character file has a few required tags, and applies extra checking for some character types. See util.Character.validate for details. This command normally ignores unknown tags. In strict mode, it will report the presence of any tag not expected by the character class. Args: search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore fix (bool): Whether to automatically fix errors when possible strict (bool): Whether to include non-critical errors and omissions report (bool): Do not include files in the return data, only problem descriptions prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. On success, openable attribute will contain a list of all files that had errors. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('lint')) openable = [] printable = [] # check each character characters = parser.get_characters(flatten(search), ignore) for character in characters: if character.tags('nolint').present: continue character.validate(strict) character.problems.extend(linters.lint(character, fix=fix, strict=strict, prefs=prefs)) # Report problems on one line if possible, or as a block if there's more than one if not character.valid: charpath = character.path if not report: openable.append(charpath) if len(character.problems) > 1: printable.append("File '{}':".format(charpath)) for detail in character.problems: printable.append(" {}".format(detail)) else: printable.append("{} in '{}'".format(character.problems[0], charpath)) return result.Success(openable=openable, printables=printable) def init(create_types=False, create_all=False, **kwargs): """ Create the basic directories for a campaign. This will create the directories this tool expects to find within a campaign. Other directories are left to the user. Args: create_types (bool): Whether to create directories for each character type create_all (bool): Whether to create all optional directories. campaign_name (str): Name of the campaign. Defaults to the name of the current directory. dryrun (bool): Do not create anything. This adds a string of changes that would be made to the returned Result object's printables variable. verbose (bool): Detail all changes made in the Result object's printables variable. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will be empty. """ prefs = kwargs.get('prefs', settings.InternalSettings()) campaign_name = kwargs.get('campaign_name', Path.cwd().name) dryrun = kwargs.get('dryrun', False) verbose = kwargs.get('verbose', False) changelog = [] def log_change(message): if dryrun or verbose: changelog.append(message) def new_dir(path_name): log_change(path_name) if not dryrun: makedirs(path_name, mode=0o775, exist_ok=True) for key, required_path in prefs.get('paths.required').items(): if key in ["additional_paths"]: # create user-specified dirs for extra_path in required_path: new_dir(extra_path) continue new_dir(required_path) if not prefs.get_settings_path('campaign').exists(): new_dir('.npc') log_change(prefs.get_settings_path('campaign')) if not dryrun: with open(prefs.get_settings_path('campaign'), 'a') as settings_file: json.dump({'campaign_name': campaign_name}, settings_file, indent=4) if create_types or create_all: cbase = Path(prefs.get('paths.required.characters')) for type_path in prefs.get_type_paths(): new_dir(cbase.joinpath(type_path)) return result.Success(printables=changelog) def open_settings(location, show_defaults=False, settings_type=None, **kwargs): """ Open the named settings file. If the desired settings file does not exist, an empty file is created and then opened. Args: location (str): Which settings file to open. One of 'user' or 'campaign'. show_defaults (bool): Whether the default settings file should be opened for reference alongside the specified settings file. settings_type (str): Determines which kind of settings file to open, like base settings or changeling settings. If left unset, base settings are opened. One of 'base' or 'changeling'. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will contain the desired settings file. If true was passed in show_defaults, it will also contain the reference settings file. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if settings_type: settings_type = settings_type.lower() target_path = prefs.get_settings_path(location, settings_type) if not target_path.exists(): dirname = target_path.parent makedirs(dirname, mode=0o775, exist_ok=True) with open(target_path, 'a') as settings_file: settings_file.write('{}') if show_defaults: openable = [prefs.get_settings_path('default', settings_type, target_path.suffix), target_path] else: openable = [target_path] return result.Success(openable=openable) def report(*tags, search=None, ignore=None, fmt=None, outfile=None, **kwargs): """ Create a report for the given tags The tabular report shows how many characters have each unique value for each tag. Args: tag (list): Tag names to report on. Can contain strings and lists of strings. search (list): Paths to search for character files. Items can be strings or lists of strings. ignore (list): Paths to ignore fmt (str|None): Output format to use. Recognized values are defined in formatters.get_report_formatter. Pass "default" or None to get the format from settings. outfile (string|None): Filename to put the listed data. None and "-" print to stdout. prefs (Settings): Settings object to use. Uses internal settings by default. Returns: Result object. Openable will contain the output file if given. """ prefs = kwargs.get('prefs', settings.InternalSettings()) if not search: search = ['.'] if not ignore: ignore = [] ignore.extend(prefs.get_ignored_paths('report')) if not fmt or fmt == 'default': fmt = prefs.get('report.default_format') # use a list so we can iterate more than once characters = list(parser.get_characters(flatten(search), ignore)) # Construct a dict keyed by tag name whose values are Counters. Each Counter # is initialized with a flattened list of lists and we let it count the # duplicates. table_data = {tag : Counter(flatten([c.tags.get(tag, 'None') for c in characters])) for tag in flatten(tags)} formatter = formatters.get_report_formatter(fmt) if not formatter: return result.OptionError(errmsg="Cannot create output of format '{}'".format(fmt)) with util.smart_open(outfile, binary=(fmt in formatters.BINARY_TYPES)) as outstream: response = formatter(table_data, outstream=outstream, prefs=prefs) # pass errors straight through if not response.success: return response
= tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = sess.run( [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomCropPadImageWithCache(self): preprocess_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1,}), (preprocessor.random_crop_pad_image, {})] self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomCropPadImageWithRandomCoefOne(self): preprocessing_options = [(preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })] images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_pad_image, { 'random_coef': 1.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_, boxes_, padded_boxes_) = sess.run( [boxes_shape, padded_boxes_shape, images_shape, padded_images_shape, boxes, padded_boxes]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertTrue((images_shape_[1] >= padded_images_shape_[1] * 0.5).all) self.assertTrue((images_shape_[2] >= padded_images_shape_[2] * 0.5).all) self.assertTrue((images_shape_[1] <= padded_images_shape_[1]).all) self.assertTrue((images_shape_[2] <= padded_images_shape_[2]).all) self.assertTrue(np.all((boxes_[:, 2] - boxes_[:, 0]) >= ( padded_boxes_[:, 2] - padded_boxes_[:, 0]))) self.assertTrue(np.all((boxes_[:, 3] - boxes_[:, 1]) >= ( padded_boxes_[:, 3] - padded_boxes_[:, 1]))) def testRandomCropToAspectRatio(self): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_crop_to_aspect_ratio, { 'aspect_ratio': 2.0 })] cropped_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) cropped_images = cropped_tensor_dict[fields.InputDataFields.image] cropped_boxes = cropped_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) cropped_boxes_shape = tf.shape(cropped_boxes) images_shape = tf.shape(images) cropped_images_shape = tf.shape(cropped_images) with self.test_session() as sess: (boxes_shape_, cropped_boxes_shape_, images_shape_, cropped_images_shape_) = sess.run([ boxes_shape, cropped_boxes_shape, images_shape, cropped_images_shape ]) self.assertAllEqual(boxes_shape_, cropped_boxes_shape_) self.assertEqual(images_shape_[1], cropped_images_shape_[1] * 2) self.assertEqual(images_shape_[2], cropped_images_shape_[2]) def testRandomPadToAspectRatio(self): images = self.createTestImages() boxes = self.createTestBoxes() labels = self.createTestLabels() tensor_dict = { fields.InputDataFields.image: images, fields.InputDataFields.groundtruth_boxes: boxes, fields.InputDataFields.groundtruth_classes: labels, } tensor_dict = preprocessor.preprocess(tensor_dict, []) images = tensor_dict[fields.InputDataFields.image] preprocessing_options = [(preprocessor.random_pad_to_aspect_ratio, { 'aspect_ratio': 2.0 })] padded_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) padded_images = padded_tensor_dict[fields.InputDataFields.image] padded_boxes = padded_tensor_dict[ fields.InputDataFields.groundtruth_boxes] boxes_shape = tf.shape(boxes) padded_boxes_shape = tf.shape(padded_boxes) images_shape = tf.shape(images) padded_images_shape = tf.shape(padded_images) with self.test_session() as sess: (boxes_shape_, padded_boxes_shape_, images_shape_, padded_images_shape_) = sess.run([ boxes_shape, padded_boxes_shape, images_shape, padded_images_shape ]) self.assertAllEqual(boxes_shape_, padded_boxes_shape_) self.assertEqual(images_shape_[1], padded_images_shape_[1]) self.assertEqual(2 * images_shape_[2], padded_images_shape_[2]) def testRandomBlackPatchesWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomBlackPatches(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_black_patches, { 'size_to_image_ratio': 0.5 })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} blacked_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) blacked_images = blacked_tensor_dict[fields.InputDataFields.image] images_shape = tf.shape(images) blacked_images_shape = tf.shape(blacked_images) with self.test_session() as sess: (images_shape_, blacked_images_shape_) = sess.run( [images_shape, blacked_images_shape]) self.assertAllEqual(images_shape_, blacked_images_shape_) def testRandomResizeMethodWithCache(self): preprocess_options = [] preprocess_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocess_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) self._testPreprocessorCache(preprocess_options, test_boxes=True, test_masks=True, test_keypoints=True) def testRandomResizeMethod(self): preprocessing_options = [] preprocessing_options.append((preprocessor.normalize_image, { 'original_minval': 0, 'original_maxval': 255, 'target_minval': 0, 'target_maxval': 1 })) preprocessing_options.append((preprocessor.random_resize_method, { 'target_size': (75, 150) })) images = self.createTestImages() tensor_dict = {fields.InputDataFields.image: images} resized_tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options) resized_images = resized_tensor_dict[fields.InputDataFields.image] resized_images_shape = tf.shape(resized_images) expected_images_shape = tf.constant([1, 75, 150, 3], dtype=tf.int32) with self.test_session() as sess: (expected_images_shape_, resized_images_shape_) = sess.run( [expected_images_shape, resized_images_shape]) self.assertAllEqual(expected_images_shape_, resized_images_shape_) def testResizeImageWithMasks(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithMasksTensorInputHeightAndWidth(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] height = tf.constant(50, dtype=tf.int32) width = tf.constant(100, dtype=tf.int32) expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 50, 100], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeImageWithNoInstanceMask(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[0, 60, 40], [0, 15, 30]] height = 50 width = 100 expected_image_shape_list = [[50, 100, 3], [50, 100, 3]] expected_masks_shape_list = [[0, 50, 100], [0, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_image( in_image, in_masks, new_height=height, new_width=width) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape]) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangePreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.random_uniform(in_shape) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_image.get_shape().as_list(), expected_shape) def testResizeToRangeWithDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[75, 50, 3], [50, 100, 3], [30, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim) out_image_shape = tf.shape(out_image) with self.test_session() as sess: out_image_shape = sess.run(out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)}) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectShapes(self): in_shape_list = [[60, 40, 3], [15, 30, 3], [15, 50, 3]] min_dim = 50 max_dim = 100 expected_shape_list = [[100, 100, 3], [100, 100, 3], [100, 100, 3]] for in_shape, expected_shape in zip(in_shape_list, expected_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) self.assertAllEqual(out_image.shape.as_list(), expected_shape) out_image_shape = tf.shape(out_image) with self.test_session() as sess: out_image_shape = sess.run( out_image_shape, feed_dict={in_image: np.random.randn(*in_shape)}) self.assertAllEqual(out_image_shape, expected_shape) def testResizeToRangeWithPadToMaxDimensionReturnsCorrectTensor(self): in_image_np = np.array([[[0, 1, 2]]], np.float32) ex_image_np = np.array( [[[0, 1, 2], [123.68, 116.779, 103.939]], [[123.68, 116.779, 103.939], [123.68, 116.779, 103.939]]], np.float32) min_dim = 1 max_dim = 2 in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) out_image, _ = preprocessor.resize_to_range( in_image, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True, per_channel_pad_value=(123.68, 116.779, 103.939)) with self.test_session() as sess: out_image_np = sess.run(out_image, feed_dict={in_image: in_image_np}) self.assertAllClose(ex_image_np, out_image_np) def testResizeToRangeWithMasksPreservesStaticSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list = [[15, 75, 50], [10, 50, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip(in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.random_uniform(in_image_shape) in_masks = tf.random_uniform(in_masks_shape) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim) self.assertAllEqual(out_masks.get_shape().as_list(), expected_mask_shape) self.assertAllEqual(out_image.get_shape().as_list(), expected_image_shape) def testResizeToRangeWithMasksAndPadToMaxDimension(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[100, 100, 3], [100, 100, 3]] expected_masks_shape_list = [[15, 100, 100], [10, 100, 100]] for (in_image_shape, expected_image_shape, in_masks_shape, expected_mask_shape) in zip( in_image_shape_list, expected_image_shape_list, in_masks_shape_list, expected_masks_shape_list): in_image = tf.placeholder(tf.float32, shape=(None, None, 3)) in_masks = tf.placeholder(tf.float32, shape=(None, None, None)) out_image, out_masks, _ = preprocessor.resize_to_range( in_image, in_masks, min_dimension=min_dim, max_dimension=max_dim, pad_to_max_dimension=True) out_image_shape = tf.shape(out_image) out_masks_shape = tf.shape(out_masks) with self.test_session() as sess: out_image_shape, out_masks_shape = sess.run( [out_image_shape, out_masks_shape], feed_dict={ in_image: np.random.randn(*in_image_shape), in_masks: np.random.randn(*in_masks_shape) }) self.assertAllEqual(out_image_shape, expected_image_shape) self.assertAllEqual(out_masks_shape, expected_mask_shape) def testResizeToRangeWithMasksAndDynamicSpatialShape(self): """Tests image resizing, checking output sizes.""" in_image_shape_list = [[60, 40, 3], [15, 30, 3]] in_masks_shape_list = [[15, 60, 40], [10, 15, 30]] min_dim = 50 max_dim = 100 expected_image_shape_list = [[75, 50, 3], [50, 100, 3]] expected_masks_shape_list =
<gh_stars>1-10 import warnings import torch import torch.nn as nn from torch.nn import functional as F from npf.utils.helpers import ( channels_to_2nd_dim, channels_to_last_dim, make_depth_sep_conv, ) from npf.utils.initialization import init_param_, weights_init __all__ = [ "GaussianConv2d", "ConvBlock", "ResNormalizedConvBlock", "ResConvBlock", "CNN", "UnetCNN", ] class GaussianConv2d(nn.Module): def __init__(self, kernel_size=5, **kwargs): super().__init__() self.kwargs = kwargs assert kernel_size % 2 == 1 self.kernel_sizes = (kernel_size, kernel_size) self.exponent = -( (torch.arange(0, kernel_size).view(-1, 1).float() - kernel_size // 2) ** 2 ) self.reset_parameters() def reset_parameters(self): self.weights_x = nn.Parameter(torch.tensor([1.0])) self.weights_y = nn.Parameter(torch.tensor([1.0])) def forward(self, X): # only switch first time to device self.exponent = self.exponent.to(X.device) marginal_x = torch.softmax(self.exponent * self.weights_x, dim=0) marginal_y = torch.softmax(self.exponent * self.weights_y, dim=0).T in_chan = X.size(1) filters = marginal_x @ marginal_y filters = filters.view(1, 1, *self.kernel_sizes).expand( in_chan, 1, *self.kernel_sizes ) return F.conv2d(X, filters, groups=in_chan, **self.kwargs) class ConvBlock(nn.Module): """Simple convolutional block with a single layer. Parameters ---------- in_chan : int Number of input channels. out_chan : int Number of output channels. Conv : nn.Module Convolutional layer (unitialized). E.g. `nn.Conv1d`. kernel_size : int or tuple, optional Size of the convolving kernel. dilation : int or tuple, optional Spacing between kernel elements. activation: callable, optional Activation object. E.g. `nn.ReLU`. Normalization : nn.Module, optional Normalization layer (unitialized). E.g. `nn.BatchNorm1d`. kwargs : Additional arguments to `Conv`. References ---------- [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2016, October). Identity mappings in deep residual networks. In European conference on computer vision (pp. 630-645). Springer, Cham. [2] <NAME>. (2017). Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1251-1258). """ def __init__( self, in_chan, out_chan, Conv, kernel_size=5, dilation=1, activation=nn.ReLU(), Normalization=nn.Identity, **kwargs ): super().__init__() self.activation = activation padding = kernel_size // 2 Conv = make_depth_sep_conv(Conv) self.conv = Conv(in_chan, out_chan, kernel_size, padding=padding, **kwargs) self.norm = Normalization(in_chan) self.reset_parameters() def reset_parameters(self): weights_init(self) def forward(self, X): return self.conv(self.activation(self.norm(X))) class ResConvBlock(nn.Module): """Convolutional block inspired by the pre-activation Resnet [1] and depthwise separable convolutions [2]. Parameters ---------- in_chan : int Number of input channels. out_chan : int Number of output channels. Conv : nn.Module Convolutional layer (unitialized). E.g. `nn.Conv1d`. kernel_size : int or tuple, optional Size of the convolving kernel. Should be odd to keep the same size. activation: callable, optional Activation object. E.g. `nn.RelU()`. Normalization : nn.Module, optional Normalization layer (unitialized). E.g. `nn.BatchNorm1d`. n_conv_layers : int, optional Number of convolutional layers, can be 1 or 2. is_bias : bool, optional Whether to use a bias. References ---------- [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2016, October). Identity mappings in deep residual networks. In European conference on computer vision (pp. 630-645). Springer, Cham. [2] <NAME>. (2017). Xception: Deep learning with depthwise separable convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1251-1258). """ def __init__( self, in_chan, out_chan, Conv, kernel_size=5, activation=nn.ReLU(), Normalization=nn.Identity, is_bias=True, n_conv_layers=1, ): super().__init__() self.activation = activation self.n_conv_layers = n_conv_layers assert self.n_conv_layers in [1, 2] if kernel_size % 2 == 0: raise ValueError("`kernel_size={}`, but should be odd.".format(kernel_size)) padding = kernel_size // 2 if self.n_conv_layers == 2: self.norm1 = Normalization(in_chan) self.conv1 = make_depth_sep_conv(Conv)( in_chan, in_chan, kernel_size, padding=padding, bias=is_bias ) self.norm2 = Normalization(in_chan) self.conv2_depthwise = Conv( in_chan, in_chan, kernel_size, padding=padding, groups=in_chan, bias=is_bias ) self.conv2_pointwise = Conv(in_chan, out_chan, 1, bias=is_bias) self.reset_parameters() def reset_parameters(self): weights_init(self) def forward(self, X): if self.n_conv_layers == 2: out = self.conv1(self.activation(self.norm1(X))) else: out = X out = self.conv2_depthwise(self.activation(self.norm2(out))) # adds residual before point wise => output can change number of channels out = out + X out = self.conv2_pointwise(out.contiguous()) # for some reason need contiguous return out class ResNormalizedConvBlock(ResConvBlock): """Modification of `ResConvBlock` to use normalized convolutions [1]. Parameters ---------- in_chan : int Number of input channels. out_chan : int Number of output channels. Conv : nn.Module Convolutional layer (unitialized). E.g. `nn.Conv1d`. kernel_size : int or tuple, optional Size of the convolving kernel. Should be odd to keep the same size. activation: nn.Module, optional Activation object. E.g. `nn.RelU()`. is_bias : bool, optional Whether to use a bias. References ---------- [1] <NAME>., & <NAME>. (1993, June). Normalized and differential convolution. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (pp. 515-523). IEEE. """ def __init__( self, in_chan, out_chan, Conv, kernel_size=5, activation=nn.ReLU(), is_bias=True, **kwargs ): super().__init__( in_chan, out_chan, Conv, kernel_size=kernel_size, activation=activation, is_bias=is_bias, Normalization=nn.Identity, **kwargs ) # make sure no normalization def reset_parameters(self): weights_init(self) self.bias = nn.Parameter(torch.tensor([0.0])) self.temperature = nn.Parameter(torch.tensor([0.0])) init_param_(self.temperature) def forward(self, X): """ Apply a normalized convolution. X should contain 2*in_chan channels. First halves for signal, last halve for corresponding confidence channels. """ signal, conf_1 = X.chunk(2, dim=1) # make sure confidence is in 0 1 (might not be due to the pointwise trsnf) conf_1 = conf_1.clamp(min=0, max=1) X = signal * conf_1 numerator = self.conv1(self.activation(X)) numerator = self.conv2_depthwise(self.activation(numerator)) density = self.conv2_depthwise(self.conv1(conf_1)) out = numerator / torch.clamp(density, min=1e-5) # adds residual before point wise => output can change number of channels # make sure that confidence cannot decrease and cannot be greater than 1 conf_2 = conf_1 + torch.sigmoid( density * F.softplus(self.temperature) + self.bias ) conf_2 = conf_2.clamp(max=1) out = out + X out = self.conv2_pointwise(out) conf_2 = self.conv2_pointwise(conf_2) return torch.cat([out, conf_2], dim=1) class CNN(nn.Module): """Simple multilayer CNN. Parameters ---------- n_channels : int or list Number of channels, same for input and output. If list then needs to be of size `n_blocks - 1`, e.g. [16, 32, 64] means that you will have a `[ConvBlock(16,32), ConvBlock(32, 64)]`. ConvBlock : nn.Module Convolutional block (unitialized). Needs to take as input `Should be initialized with `ConvBlock(in_chan, out_chan)`. n_blocks : int, optional Number of convolutional blocks. is_chan_last : bool, optional Whether the channels are on the last dimension of the input. kwargs : Additional arguments to `ConvBlock`. """ def __init__(self, n_channels, ConvBlock, n_blocks=3, is_chan_last=False, **kwargs): super().__init__() self.n_blocks = n_blocks self.is_chan_last = is_chan_last self.in_out_channels = self._get_in_out_channels(n_channels, n_blocks) self.conv_blocks = nn.ModuleList( [ ConvBlock(in_chan, out_chan, **kwargs) for in_chan, out_chan in self.in_out_channels ] ) self.is_return_rep = False # never return representation for vanilla conv self.reset_parameters() def reset_parameters(self): weights_init(self) def _get_in_out_channels(self, n_channels, n_blocks): """Return a list of tuple of input and output channels.""" if isinstance(n_channels, int): channel_list = [n_channels] * (n_blocks + 1) else: channel_list = list(n_channels) assert len(channel_list) == (n_blocks + 1), "{} != {}".format( len(channel_list), n_blocks + 1 ) return list(zip(channel_list, channel_list[1:])) def forward(self, X): if self.is_chan_last: X = channels_to_2nd_dim(X) X, representation = self.apply_convs(X) if self.is_chan_last: X = channels_to_last_dim(X) if self.is_return_rep: return X, representation return X def apply_convs(self, X): for conv_block in self.conv_blocks: X = conv_block(X) return X, None class UnetCNN(CNN): """Unet [1]. Parameters ---------- n_channels : int or list Number of channels, same for input and output. If list then needs to be of size `n_blocks - 1`, e.g. [16, 32, 64] means that you will have a `[ConvBlock(16,32), ConvBlock(32, 64)]`. ConvBlock : nn.Module Convolutional block (unitialized). Needs to take as input `Should be initialized with `ConvBlock(in_chan, out_chan)`. Pool : nn.Module Pooling layer (unitialized). E.g. torch.nn.MaxPool1d. upsample_mode : {'nearest', 'linear', bilinear', 'bicubic', 'trilinear'} The upsampling algorithm: nearest, linear (1D-only), bilinear, bicubic (2D-only), trilinear (3D-only). max_nchannels : int, optional Bounds the maximum number of channels instead of always doubling them at downsampling block. pooling_size : int or tuple, optional Size of the pooling filter. is_force_same_bottleneck : bool, optional Whether to use the average bottleneck for the same functions sampled at different context and target. If `True` the first and second halves of a batch should contain different samples of the same functions (in order). is_return_rep : bool, optional Whether to return a summary representation, that corresponds to the bottleneck + global mean pooling. kwargs : Additional arguments to `CNN` and `ConvBlock`. References ---------- [1] Ronneberger, Olaf, <NAME>, and <NAME>. "U-net: Convolutional networks for biomedical image segmentation." International Conference on Medical image computing and computer-assisted intervention. Springer, Cham, 2015. """ def __init__( self, n_channels, ConvBlock, Pool, upsample_mode, max_nchannels=256, pooling_size=2, is_force_same_bottleneck=False, is_return_rep=False, **kwargs ): self.max_nchannels = max_nchannels super().__init__(n_channels, ConvBlock, **kwargs) self.pooling_size = pooling_size self.pooling = Pool(self.pooling_size) self.upsample_mode = upsample_mode self.is_force_same_bottleneck = is_force_same_bottleneck self.is_return_rep = is_return_rep def apply_convs(self, X): n_down_blocks = self.n_blocks
# 6.0001 Spring 2020 # Problem Set 3 # Written by: sylvant, muneezap, charz, anabell, nhung, wang19k, asinelni, shahul, jcsands # Problem Set 3 # Name: <NAME> # Collaborators: <NAME> # Time Spent: 4:00 # Late Days Used: (only if you are using any) import string # - - - - - - - - - - # Check for similarity by comparing two texts to see how similar they are to each other ### Problem 1: Prep Data ### # Make a *small* change to separate the data by whitespace rather than just tabs def load_file(filename): """ Args: filename: string, name of file to read Returns: list of strings holding the file contents where each string was separated by an empty space in the file """ inFile = open(filename, 'r') line = inFile.read() inFile.close() line = line.strip().lower() for char in string.punctuation: line = line.replace(char, "") #Change from "/t" to "" return line.split() ### Problem 2: Find Ngrams ### def find_ngrams(single_words, n): """ Args: single_words: list of words in the text, in the order they appear in the text all words are made of lowercase characters n: length of 'n-gram' window Returns: list of n-grams from input text list, or an empty list if n is not a valid value """ ngrams = [] if n <= 0 or n > len(single_words): return ngrams #returns empty list elif n == 1: return single_words #returns original input else: for i in range(len(single_words)): if n + i > len(single_words): break #done so that the very last word of the n-gram is the #very last word of the list single_words else: mini_list = single_words[i:n+i] #creates a list with the words in the ngram #the list contains the words between index i and n+i so that #the maximum possible value of n+i is the length of single_words ngrams_word = ' '.join([str(item) for item in mini_list]) #the list is transformed into a string with spaces in between #and no spaces at the beginning or end ngrams.append(ngrams_word) #the n-gram string is added to the list return ngrams ### Problem 3: Word Frequency ### def compute_frequencies(words): """ Args: words: list of words (or n-grams), all are made of lowercase characters Returns: dictionary that maps string:int where each string is a word (or n-gram) in words and the corresponding int is the frequency of the word (or n-gram) in words """ frequency_dict = {} for i in words: if i in frequency_dict: frequency_dict[i] +=1 #if the word/n-gram is already in the dictionnary, its frequency #keeps on increasing by one every time it appears in the list #of words/n-grams else: frequency_dict[i] = 1 #if the word isn't in the dictionnary, its frequency is then set #to one and if it appears again then it will go through the #previous conditional return frequency_dict ### Problem 4: Similarity ### def get_similarity_score(dict1, dict2, dissimilarity = False): """ The keys of dict1 and dict2 are all lowercase, you will NOT need to worry about case sensitivity. Args: dict1: frequency dictionary of words or n-grams for one text dict2: frequency dictionary of words or n-grams for another text dissimilarity: Boolean, optional parameter. Default to False. If this is True, return the dissimilarity score, 100*(DIFF/ALL), instead. Returns: int, a percentage between 0 and 100, inclusive representing how similar the texts are to each other The difference in text frequencies = DIFF sums words from these three scenarios: * If a word or n-gram occurs in dict1 and dict2 then get the difference in frequencies * If a word or n-gram occurs only in dict1 then take the frequency from dict1 * If a word or n-gram occurs only in dict2 then take the frequency from dict2 The total frequencies = ALL is calculated by summing all frequencies in both dict1 and dict2. Return 100*(1-(DIFF/ALL)) rounded to the nearest whole number if dissimilarity is False, otherwise returns 100*(DIFF/ALL) """ DIFF = 0 for i in dict1: x = False #Boolean used to not add repeated frequencies as it will be seen later for j in dict2: if i == j: #use of == instead of i in j as for example word "meme" could #be in "memes" and would therefore cause a problem DIFF += abs(dict1[i] - dict2[j]) #if the word/n-gram appears in both dictionnaires then #the absolute value of the difference between the frequencies #in each dictionnary is added to DIFF x = True if x == False: #Boolean used so that frequencies of a word/n-gram are not added again #and again to DIFF DIFF += dict1[i] for j in dict2: x = False #same use of boolean for same reasons as previou for loop for i in dict1: if i == j: #use of == due to the same reason x = True #this time the absolute value of the difference between the #frequencies doesn't have to be added as it already has been if x == False: DIFF += dict2[j] ALL = 0 for i in dict1: ALL += dict1[i] #all the frequencies of the first dictionnary are added to ALL for j in dict2: ALL += dict2[j] #same occurs as in the previous loop but for the second dictionnary #Depending on the input of dissimilarity this will occur if dissimilarity == False: result = round(100*(1 - (DIFF/ALL))) #similarity between the dictionnaries of word/n-grams is the result else: result = round(100*(DIFF/ALL)) #dissimilarity between the dictionnaries of word/n-grams is the result return result ### Problem 5: Most Frequent Word(s) ### def compute_most_frequent(dict1, dict2): """ The keys of dict1 and dict2 are all lowercase, you will NOT need to worry about case sensitivity. Args: dict1: frequency dictionary for one text dict2: frequency dictionary for another text Returns: list of the most frequent word(s) in the input dictionaries The most frequent word: * is based on the combined word frequencies across both dictionaries. If a word occurs in both dictionaries, consider the sum the freqencies as the combined word frequency. * need not be in both dictionaries, i.e it can be exclusively in dict1, dict2, or shared by dict1 and dict2. If multiple words are tied (i.e. share the same highest frequency), return an alphabetically ordered list of all these words. """ list_freq = [] most_freq = [] #different frequencies of each word/n-gram in the dictionnaries will be #appended to list_freq in the following 2 for loops for i in dict1: list_freq.append(dict1[i]) for i in dict2: list_freq.append(dict2[i]) #Using the maximum value of list_freq, if this maximum value is the #value of any of the words/n-grams in the dictionnaries, these words #will be added to the list most_freq for i in dict1: if dict1[i] == max(list_freq): most_freq.append(i) for i in dict2: if dict2[i] == max(list_freq): most_freq.append(i) return sorted(most_freq) #use of sorted() as specification establises that the list has to be #alphabetically ordered ### Problem 6: Finding closest artist ### def find_closest_artist(artist_to_songfiles, mystery_lyrics, ngrams = 1): """ Args: artist_to_songfiles: dictionary that maps string:list of strings where each string key is an artist name and the corresponding list is a list of filenames (including the extension), each holding lyrics to a song by that artist mystery_lyrics: list of single word strings Can be more than one or two words (can also be an empty list) assume each string is made of lowercase characters ngrams: int, optional parameter. Default set to False. If it is greater than 1, n-grams of text in files and n-grams of mystery_lyrics should be used in analysis, with n set to the value of the parameter ngrams Returns: list of artists (in alphabetical order) that best match the mystery lyrics (i.e. list of artists that share the highest average similarity score (to the nearest whole number)) The best match is defined as the artist(s) whose songs have the highest average similarity score (after rounding) with the
l weights_fname = spec.pop('weights_fname', None) if type(spec['ordinal_bins']) is not int: spec['ordinal_bins'] = [int(i) for i in spec['ordinal_bins'].split('_')[1:][::2]] #print(weights_fname) assert weights_fname is not None, "Provide a valid weights filename to load model." model = MordredStrategy(**spec) model.set_weights(weights_fname) return model @property def seed_length(self): return self.lookback + 1 class MordredXStrategy(ModelStrategy): required_spec_keys = ['n_ar_channels', 'n_exog_channels', 'units', 'dropout_rate', 'lam', 'horizon', 'lookback'] id = 'mordredX' def __init__(self, ar_ordinal_bins=85, exog_ordinal_bins=85, units=64, dropout_rate=0.25, lam=1e-9, lookback=100, horizon=100, n_ar_channels=1, n_exog_channels=1, custom_objs=[]): # type: (int, int, float, float, int, int, int, list) -> None self.n_ar_bins = ar_ordinal_bins self.n_exog_bins = exog_ordinal_bins self.n_hidden = units self.dropout_rate = dropout_rate self.lam = lam self.lookback = lookback self.horizon = horizon self.n_ar_channels = n_ar_channels self.n_exog_channels = n_exog_channels #self.filename = 'mordredx_{}_bins_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_ARchannels_{}_EXchannels_{}'\ # .format(self.n_bins, self.n_hidden, self.dropout_rate, self.lam, # self.lookback, self.horizon, self.n_ar_channels, self.n_exog_channels) loss = 'categorical_crossentropy' custom_objs = custom_objs lstm_spec = {'units': self.n_hidden, 'return_state': True, 'kernel_regularizer': l2(self.lam), 'recurrent_regularizer': l2(self.lam), 'dropout': self.dropout_rate, 'recurrent_dropout': self.dropout_rate} dense_spec = {'activation': 'softmax', 'kernel_regularizer': l2(self.lam)} infr_init_h = Input(shape=(self.n_hidden,)) infr_init_C = Input(shape=(self.n_hidden,)) all_encoder_inputs = [Input(shape=(None, self.n_ar_bins[i]), name='encoder_channel_{}'.format(i + 1)) for i in range(self.n_ar_channels)] all_exog_encoder_inputs = [Input(shape=(None, self.n_exog_bins[i]), name='exog_encoder_channel_{}'.format(i + 1)) for i in range(self.n_exog_channels)] all_decoder_inputs = [Input(shape=(None, self.n_ar_bins[i]), name='decoder_channel_{}'.format(i + 1)) for i in range(self.n_ar_channels)] all_exog_decoder_inputs = [Input(shape=(None, self.n_exog_bins[i]), name='exog_decoder_channel_{}'.format(i + 1)) for i in range(self.n_exog_channels)] encoder_input = Concatenate(axis=-1)(all_encoder_inputs + all_exog_encoder_inputs) decoder_input = Concatenate(axis=-1)(all_decoder_inputs + all_exog_decoder_inputs) train_inputs = all_encoder_inputs + all_exog_encoder_inputs + all_decoder_inputs + all_exog_decoder_inputs encoder_predict_inputs = all_encoder_inputs + all_exog_encoder_inputs + [K.learning_phase()] decoder_predict_inputs = all_decoder_inputs + all_exog_decoder_inputs + [infr_init_h, infr_init_C, K.learning_phase()] encoder_fwd = LSTM(**lstm_spec) lstm_spec['go_backwards'] = True encoder_bkwd = LSTM(**lstm_spec) _, h_fwd, C_fwd = encoder_fwd(encoder_input) _, h_bkwd, C_bkwd = encoder_bkwd(encoder_input) decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])] lstm_spec['return_sequences'] = True lstm_spec['go_backwards'] = False decoder_lstm = LSTM(**lstm_spec) decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states) infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C]) if self.dropout_rate > 0.: decoder_output = Dropout(self.dropout_rate)(decoder_output) infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output) train_outputs = [] decoder_predict_outputs = [] for i in range(self.n_ar_channels): dense_spec['units'] = self.n_ar_bins[i] decoder_dense = Dense(**dense_spec) train_outputs += [decoder_dense(decoder_output)] decoder_predict_outputs += [decoder_dense(infr_decoder_output)] decoder_predict_outputs += [infr_h, infr_C] self.__sequence2sequence = Model(train_inputs, train_outputs) self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs) self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs) self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs) self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states) self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs) def fit(self, train_frames, **kwargs): # IMPORTANT: asssumes train_frames is a nparray which technically # does not allow for channels with different number of bins # output channels come before exogenous channels batch_size = kwargs.get('batch_size', 256) val_p = kwargs.get('validation_split', 0.15) epochs = kwargs.get('epochs', 50) def get_inputs(x_list): return [x[:, :self.lookback] for x in x_list] + \ [x[:, self.lookback:self.lookback + self.horizon] for x in x_list] def get_outputs(x_list, n_ar=1): return [x[:, self.lookback + 1:self.lookback + self.horizon + 1] for x in x_list[:n_ar]] train_gen, val_gen, tr_steps, val_steps = train_frames(get_inputs=get_inputs, get_outputs=partial(get_outputs, n_ar=self.n_ar_channels), batch_size=batch_size, val_p=val_p) cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)])) callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'), ModelCheckpoint(cp_fname, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)] self.__sequence2sequence.fit_generator(train_gen, steps_per_epoch=tr_steps, verbose=2, validation_data=val_gen, validation_steps=val_steps, callbacks=callbacks, epochs=epochs) self.__sequence2sequence.load_weights(cp_fname) os.remove(cp_fname) def predict(self, ar_input_list, exog_input_list=[], predictive_horizon=100, mc_samples=100): #exog_input_list[i] must have at least lookback + predictive_horizon samples exog_min_length = self.lookback + predictive_horizon for i_exog, exog_input in enumerate(exog_input_list): assert exog_input.shape[1] >= exog_min_length, '{} exog input has {} < {} samples'.format(i_exog, exog_input.shape[1], exog_min_length) samples = [[] for _ in range(self.n_ar_channels)] encoder_inputs = [inputs[:, :self.lookback, :] for inputs in ar_input_list + exog_input_list] first_decoder_seed = [inputs[:, self.lookback:self.lookback+1, :] for inputs in ar_input_list + exog_input_list] for i_s in range(mc_samples): h, c = self.predict_stochastic_encoder(encoder_inputs + [True]) decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True]) seq = [decoder_stochastic_output[:-2]] # length is number of AR channels for t in range(1, predictive_horizon): current_exog_input = [inputs[:, self.lookback+t:self.lookback+t+1, :] for inputs in exog_input_list] decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output[:-2] + current_exog_input + decoder_stochastic_output[-2:] + [True]) seq += [decoder_stochastic_output[:-2]] for i_ch in range(self.n_ar_channels): samples[i_ch] += [np.stack([s[i_ch] for s in seq], axis=-1).T.squeeze()] posterior_mean = [np.stack(i_samples).mean(axis=0).squeeze() for i_samples in samples] drawn_samples = [] for i_ch in range(self.n_ar_channels): ch_posterior = posterior_mean[i_ch] ch_samples = [np.random.choice(self.n_ar_bins[i_ch], mc_samples, p=ch_posterior[t]) for t in range(predictive_horizon)] drawn_samples += [np.stack(ch_samples, axis=-1)] return {'ordinal_pdf': posterior_mean, 'draws': drawn_samples} def set_weights(self, weights_fname): self.__sequence2sequence.load_weights(weights_fname) @staticmethod def load(fname, custom_objs = None): with open(fname, 'rb') as f: spec = pickle.load(f) if custom_objs is not None: spec['custom_objs'] = custom_objs if 'lambda' in spec: l = spec.pop('lambda', 0.) spec['lam'] = l weights_fname = spec.pop('weights_fname', None) assert weights_fname is not None, "Provide a valid weights filename to load model." model = MordredXStrategy(**spec) model.set_weights(weights_fname) return model def save(self, folder, fname=None): save_obj = {'units': self.n_hidden, 'dropout_rate': self.dropout_rate, 'lam': self.lam, 'lookback': self.lookback, 'horizon': self.horizon, 'n_ar_channels': self.n_ar_channels, 'n_exog_channels': self.n_exog_channels, 'ar_ordinal_bins':self.n_ar_bins, 'exog_ordinal_bins':self.n_exog_bins} if fname is None: fname = MordredXStrategy.get_filename(save_obj, folder) fname = folder + fname weights_fname = fname + '_weights.h5' save_obj['weights_fname'] = weights_fname self.__sequence2sequence.save_weights(weights_fname, overwrite=True) with open(fname, 'wb') as f: pickle.dump(save_obj, f) def get_spec(self): return {'units': self.n_hidden, 'dropout_rate': self.dropout_rate, 'lam': self.lam, 'lookback': self.lookback, 'horizon': self.horizon, 'n_ar_channels': self.n_ar_channels, 'n_exog_channels': self.n_exog_channels, 'ar_ordinal_bins':self.n_ar_bins, 'exog_ordinal_bins':self.n_exog_bins} @staticmethod def get_filename(model_spec, folder='.'): assert all([k in model_spec for k in MordredXStrategy.required_spec_keys]) fname = 'mordredx_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}_exog_{}'.format(model_spec[ 'units'], model_spec[ 'dropout_rate'], model_spec[ 'lam'], model_spec[ 'lookback'], model_spec[ 'horizon'], model_spec[ 'n_ar_channels'], model_spec[ 'n_exog_channels'] ) return fname[:MAX_FNAME_LENGTH] @property def seed_length(self): return self.lookback + 1 class SARIMAXStrategy(ModelStrategy): filename = '' id = 'sarimax' def __init__(self, order, seasonal_order=(0,0,0,0)): self.order = order self.seasonal_order = seasonal_order def fit(self, train_frames, **kwargs): self.model = sm.tsa.statespace.SARIMAX(train_frames, order=self.order, seasonal_order=self.seasonal_order, enforce_stationarity=False) self.fit_res = self.model.fit(disp=False) def predict(self, inputs, predictive_horizon=100, **kwargs): pred = self.fit_res.get_forecast(steps=predictive_horizon) return {'posterior_mean':pred.predicted_mean, 'posterior_std':np.sqrt(pred.var_pred_mean)} @staticmethod def load(fname, **kwargs): this = None with open(fname, 'r') as f: this = pickle.load(f) return this def save(self, folder): params = {'order':self.order, 'seasonal_order':self.seasonal_order} with open(folder + SARIMAXStrategy.get_filename(params), 'wb') as f: pickle.dump(self, f) @staticmethod def get_filename(params): # type: (dict) -> str return 'sarimax_{}_{}'.format(params['order'][0], params['seasonal_order'][0]) @property def seed_length(self): return 121 class ContinuousSeq2Seq(ModelStrategy): """Implements the ordinal sequence-to-sequence time series forecasting strategy.""" required_spec_keys = ['units', 'dropout_rate', 'lam', 'horizon', 'lookback'] id = 'seq2seq' def __init__(self, units=64, dropout_rate=0.25, lam=1e-9, lookback=100, horizon=100, n_channels=1, custom_objs=[]): # type: (int, float, float, int, int, int, list) -> None self.n_hidden = units self.dropout_rate = dropout_rate self.lam = lam self.lookback = lookback self.horizon = horizon self.n_channels = n_channels self.filename = 'contseq2seq_{}_hidden_{}_dropout_{}_l2_lookback_{}_horizon_{}_channels_{}'.format( self.n_hidden, self.dropout_rate, self.lam, self.lookback, self.horizon, self.n_channels) loss = 'mse' custom_objs = custom_objs lstm_spec = {'units': self.n_hidden, 'return_state': True, 'kernel_regularizer': l2(self.lam), 'recurrent_regularizer': l2(self.lam), 'dropout': self.dropout_rate, 'recurrent_dropout': self.dropout_rate} dense_spec = {'units': self.n_channels, 'activation': 'linear', 'kernel_regularizer': l2(self.lam)} infr_init_h = Input(shape=(self.n_hidden,)) infr_init_C = Input(shape=(self.n_hidden,)) encoder_input = Input(shape=(None, self.n_channels)) decoder_input = Input(shape=(None, self.n_channels)) train_inputs = [encoder_input, decoder_input] encoder_predict_inputs = [encoder_input, K.learning_phase()] decoder_predict_inputs = [decoder_input, infr_init_h, infr_init_C, K.learning_phase()] encoder_fwd = LSTM(**lstm_spec) lstm_spec['go_backwards'] = True encoder_bkwd = LSTM(**lstm_spec) _, h_fwd, C_fwd = encoder_fwd(encoder_input) _, h_bkwd, C_bkwd = encoder_bkwd(encoder_input) decoder_initial_states = [Average()([h_fwd, h_bkwd]), Average()([C_fwd, C_bkwd])] lstm_spec['return_sequences'] = True lstm_spec['go_backwards'] = False decoder_lstm = LSTM(**lstm_spec) decoder_output, _, _ = decoder_lstm(decoder_input, initial_state=decoder_initial_states) infr_decoder_output, infr_h, infr_C = decoder_lstm(decoder_input, initial_state=[infr_init_h, infr_init_C]) if self.dropout_rate > 0.: decoder_output = Dropout(self.dropout_rate)(decoder_output) infr_decoder_output = Dropout(self.dropout_rate)(infr_decoder_output) decoder_dense = Dense(**dense_spec) decoded_sequence = decoder_dense(decoder_output) train_outputs = [decoded_sequence] inferred_sequence = decoder_dense(infr_decoder_output) decoder_predict_outputs = [inferred_sequence, infr_h, infr_C] self.__sequence2sequence = Model(train_inputs, train_outputs) self.__sequence2sequence.compile(optimizer='nadam', loss=loss, metrics=[loss] + custom_objs) self.__encoder = Model(encoder_predict_inputs[:-1], decoder_initial_states) self.__decoder = Model(decoder_predict_inputs[:-1], decoder_predict_outputs) self.predict_stochastic = K.function(train_inputs + [K.learning_phase()], train_outputs) self.predict_stochastic_encoder = K.function(encoder_predict_inputs, decoder_initial_states) self.predict_stochastic_decoder = K.function(decoder_predict_inputs, decoder_predict_outputs) def fit(self, train_frames, **kwargs): cp_fname = 'cp_{}'.format(''.join([random.choice('0123456789ABCDEF') for _ in range(16)])) callbacks = [EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=2, verbose=1, mode='min'), ModelCheckpoint(cp_fname, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)] inputs = [train_frames[:, :self.lookback], train_frames[:, self.lookback:self.lookback + self.horizon]] outputs = [train_frames[:, self.lookback + 1:self.lookback + self.horizon + 1]] self.__sequence2sequence.fit(inputs, outputs, verbose=2, callbacks=callbacks, **kwargs) self.__sequence2sequence.load_weights(cp_fname) os.remove(cp_fname) def predict(self, inputs, predictive_horizon=100, mc_samples=100): samples = [] encoder_inputs = [inputs[:, :self.lookback]] first_decoder_seed = [inputs[:, self.lookback:self.lookback + 1]] for i_s in range(mc_samples): h, c = self.predict_stochastic_encoder(encoder_inputs + [True]) decoder_stochastic_output = self.predict_stochastic_decoder(first_decoder_seed + [h, c, True]) seq = [decoder_stochastic_output[:-2]] for t in range(predictive_horizon-1): decoder_stochastic_output = self.predict_stochastic_decoder(decoder_stochastic_output + [True]) seq += [decoder_stochastic_output[:-2]] samples += [np.stack(seq, axis=-1).T.squeeze()] return {'draws': np.stack(samples)} def save(self, folder, fname=None): save_obj = {'units': self.n_hidden, 'dropout_rate': self.dropout_rate, 'lam': self.lam, 'lookback': self.lookback, 'horizon': self.horizon, 'n_channels':self.n_channels} if fname is None: fname = ContinuousSeq2Seq.get_filename(save_obj) fname = folder + fname weights_fname = fname + '_weights.h5' save_obj['weights_fname'] = weights_fname self.__sequence2sequence.save_weights(weights_fname, overwrite=True) with open(fname, 'wb') as f: pickle.dump(save_obj, f) def set_weights(self, weights_fname): self.__sequence2sequence.load_weights(weights_fname) @staticmethod def get_filename(model_spec): assert all([k in model_spec for k
<reponame>railtoolkit/OpenLinTim #!/usr/bin/env python3 # -*- coding: utf-8 -*- __all__ = [ "ScenarioGenerator", "ScenarioScheduler", "ConfigurableScenarioGenerator", "ScAlbertU1", "ScAlbertU2", "DistributionScenarioGenerator", "PoissonDistScenarioGenerator", "NormalDistScenarioGenerator", "TreeOnTrackScenarioGenerator" ] import itertools, numpy, functools from robtim import Dataset, LinTimCSVWriter class ScenarioGenerator: """ Abstract super class of all scenario generators. A scenario generator models a set of delay scenarios. A scenario generator uses a python generator, implemented in :func:`scenarios` to yield delay scenarios. Be aware that a scenario generator may yield infinetly many delay scenarios. When using :class:`RobustnessEvaluator` you have to take care of the termination yourself. Use for example :class:`ScenarioScheduler`. """ def initialize(self, dataset : Dataset): """ Initializes the generator. This is where configuration changes for LinTim components should be applied. :param dataset: dataset which delays should be generated on """ pass def scenarios(self, dataset : Dataset): """ This function has to be a python generator which generates a new delay scenario and yields a dict of its properties as long as more delays are available. The dict of properties should correspond to the parameters of LinTim component dm-delays. :param dataset: dataset which delays should be generated on """ pass def reset(self, dataset : Dataset): """ Deletes all generated delays from the dataset. This affects the following files in the dataset's directory: - delay-management/Delays-Activities.giv - delay-management/Delays-Events.giv :param dataset: dataset which delays should be generated on """ dataset.delete("delay-management/Delays-Activities.giv") dataset.delete("delay-management/Delays-Events.giv") class ScenarioScheduler(ScenarioGenerator): """ Schedules different scenario generators for successive execution. In this way it ensures that the evaluation terminates. :example: ScenarioScheduler(ScAlbertU1(4), iterations=[5]) models the scenario set constisting of 5 scenarios generated by ScAlbertU1 with parameter 4. :param args: one or several scenario generators :param iterations: list of integers. The i-th value states how many scenarios from the i-th scenario generator in args should be taken """ def __init__(self, *args, iterations : list = []): self.generators = args self.iterations = iterations def initialize(self, dataset : Dataset): pass def scenarios(self, dataset : Dataset): for sc, it in zip(self.generators, itertools.chain(self.iterations, itertools.repeat(None))): sc.initialize(dataset) for scenario in itertools.islice(sc.scenarios(dataset), it): yield scenario class ConfigurableScenarioGenerator(ScenarioGenerator): """ Generates delay scenarios using LinTim's dm-delays component. The given configuration is used. Be aware that this generator will generate infinetly many delays. Use :class:`ScenarioScheduler` to guarantee that your evaluation terminates. :param config: configuration for LinTim's dm-delays """ def __init__(self, config): self.config = config def initialize(self, dataset : Dataset): dataset.applyConfig(self.config) def scenarios(self, dataset : Dataset): while True: dataset.make("dm-delays") yield dict(self.config) class ScAlbertU1(ConfigurableScenarioGenerator): """ Scenario generator for the scenario set U_1 defined in Bachelor thesis by Albert :param s: max source delay :param smin: (optional) min source delays """ def __init__(self, s, smin = 0): super().__init__({ "delays_generator": "uniform_distribution", "delays_events": False, "delays_activities": True, "delays_append" : False, "delays_absolute_numbers" : False, "delays_min_delay": smin, "delays_max_delay": s, "delays_count_is_absolute" : False, "delays_count" : 100 }) class ScAlbertU2(ConfigurableScenarioGenerator): """ Scenario generator for the scenario set U_1 defined in Bachelor thesis by Albert :param s: max source delay :param k: max amount of activities delayed :param smin: (optional) min source delays """ def __init__(self, s, k, smin = 0): super().__init__({ "delays_generator": "uniform_distribution", "delays_events": False, "delays_activities": True, "delays_append" : False, "delays_absolute_numbers" : False, "delays_min_delay": smin, "delays_max_delay": s, "delays_count_is_absolute" : True, "delays_count" : k }) class DistributionScenarioGenerator(ScenarioGenerator): """ Generates delay scenarios with a custom distribution In addition to the regular fields described in :class:`ScenarioGenerator` this generator yields for every scenario the following values: - `delays_total` sum of all delays on activities and events in seconds - `delays_total_weighted` sum of all delays weighted by the amount of passengers on the activities / events in seconds - `delays_total_passengers` total amount of passengers affected by the delays, i.e. number of passengers on activities / events with delays - `delays_average` = `delays_total` / `delays_total_passengers` - `delays_average_weighted` = `delays_total_weighted` / `delays_total_passengers` :param randomizer: function int -> list[int] which returns a given amount of random delays :param count: number/percantage of events/activites which shall be delayed :param count_abs: whether count is absolute (True) or relative (False) :param events: whether events shall be delayed :param activities: whether activities shall be delays :param info: info about the distribution for statistical evaluation """ def __init__(self, randomizer, count : int = 10, count_abs : bool = True, events : bool = False, activities : bool = True, info : dict = None): self.randomizer = randomizer self.count = count self.count_abs = count_abs self.events = events self.activities = activities self.info = info def initialize(self, dataset): self.__act_ids = dataset.readCSV("delay-management/Activities-expanded.giv", columns=[0,2,6]) self.__act_ids = list(filter(lambda x: x[1] == "drive", self.__act_ids)) self.__evt_ids = dataset.readCSV("delay-management/Events-expanded.giv", columns=[0,4]) def scenarios(self, dataset : Dataset): while True: self.reset(dataset) totalDelay = 0 totalDelayWeighted = 0 totalPassengers = 0 num_act = self.count if self.activities else 0 num_evt = self.count if self.events else 0 if self.count_abs and self.events and self.activities: num_act = numpy.random.rand(0, self.count) num_evt = self.count - num_act if num_act > 0: activities = list(self.__act_ids) if not self.count_abs: num_act = len(activities) * self.count // 100 delays = self.randomizer(num_act)[:num_act] numpy.random.shuffle(activities) with open(dataset.realPath("delay-management/Delays-Activities.giv"), "wt") as f: writer = LinTimCSVWriter(f) writer.write("# RobTim random delays") for (act, delay) in zip(activities, delays): if int(delay) != 0: writer.write([act[0], int(delay)]) totalDelay += int(delay) totalDelayWeighted += int(delay) * act[2] totalPassengers += act[2] if num_evt > 0: events = list(self.__evt_ids) if not self.count_abs: num_evt = len(events) * self.count // 100 delays = self.randomizer(num_evt)[:num_evt] numpy.random.shuffle(events) with open(dataset.realPath("delay-management/Delays-Events.giv"), "wt") as f: writer = LinTimCSVWriter(f) writer.write("# RobTim random delays") for (evt, delay) in zip(events, delays): if int(delay) != 0: writer.write([evt[0], int(delay)]) totalDelay += int(delay) totalDelayWeighted += int(delay) * evt[1] totalPassengers += evt[1] yield { **self.info, "delays_generator" : "robtim_distribution", "randomizer": self.randomizer, "delays_count": self.count, "delays_absolute_numbers": self.count_abs, "delays_events": self.events, "delays_activities": self.activities, "delays_total": totalDelay, "delays_total_weighted": totalDelayWeighted, "delays_total_passengers": totalPassengers, "delays_average": totalDelay / totalPassengers if totalPassengers > 0 else 0, "delays_average_weighted": totalDelayWeighted / totalPassengers if totalPassengers > 0 else 0 } class PoissonDistScenarioGenerator(DistributionScenarioGenerator): """ Generates poisson distributed delay scenarios Formula: stretch * numpy.random.poisson(lam) :param lam: poisson distribution's lambda parameter :param stretch: stretch parameter :param count: number/percantage of events/activites which shall be delayed :param count_abs: whether count is absolute (True) or relative (False) :param events: whether events shall be delayed :param activities: whether activities shall be delays """ def __init__(self, lam : float, stretch : float = 1, count : int = 10, count_abs : bool = True, events : bool = False, activities : bool = True): super().__init__( lambda n : stretch*numpy.random.poisson(lam, n), count = count, count_abs = count_abs, events = events, activities = activities, info = {"distribution" : "poisson", "lambda" : lam, "stretch" : stretch} ) class NormalDistScenarioGenerator(DistributionScenarioGenerator): """ Generates normal distributed delay scenarios Formula: numpy.random.normal(mean, deviation) :param mean: mean :param deviation: standard deviation :param count: number/percantage of events/activites which shall be delayed :param count_abs: whether count is absolute (True) or relative (False) :param events: whether events shall be delayed :param activities: whether activities shall be delays """ def __init__(self, mean : float, deviation : float, count : int = 10, count_abs : bool = True, events : bool = False, activities : bool = True): super().__init__( functools.partial(numpy.random.normal, mean, deviation), count = count, count_abs = count_abs, events = events, activities = activities, info = {"distribution" : "normal", "mean" : mean, "deviation" : deviation} ) class TreeOnTrackScenarioGenerator(DistributionScenarioGenerator): """ Generates delay scenarios with tree-on-track distribution. The amount of trees, e.g. the amount of acitivities/events is binomial distributed with parameters lambda and stretch. The delay each tree causes is exponentially distributed with parameter beta. :param binomial_p: binomial p parameter in [0,1] :param exp_beta: exponential beta parameter :param events: whether events shall be delayed :param activities: whether activities shall be delays """ def __init__(self, binomial_p : float, exp_beta : float, events : bool = False, activities : bool = True): def randomizer(n) : p = numpy.random.binomial(n, binomial_p) delays = numpy.random.exponential(exp_beta, p) return delays super().__init__( randomizer, count = 100, count_abs = False, events = events,
in the archive. 'root_dir' and 'base_dir' both default to the current directory. Returns the name of the archive file. 'owner' and 'group' are used when creating a tar archive. By default, uses the current owner and group. """ sys.audit("shutil.make_archive", base_name, format, root_dir, base_dir) save_cwd = os.getcwd() if root_dir is not None: if logger is not None: logger.debug("changing into '%s'", root_dir) base_name = os.path.abspath(base_name) if not dry_run: os.chdir(root_dir) if base_dir is None: base_dir = os.curdir kwargs = {'dry_run': dry_run, 'logger': logger} try: format_info = _ARCHIVE_FORMATS[format] except KeyError: raise ValueError("unknown archive format '%s'" % format) from None func = format_info[0] for arg, val in format_info[1]: kwargs[arg] = val if format != 'zip': kwargs['owner'] = owner kwargs['group'] = group try: filename = func(base_name, base_dir, **kwargs) finally: if root_dir is not None: if logger is not None: logger.debug("changing back to '%s'", save_cwd) os.chdir(save_cwd) return filename def get_unpack_formats(): """Returns a list of supported formats for unpacking. Each element of the returned sequence is a tuple (name, extensions, description) """ formats = [(name, info[0], info[3]) for name, info in _UNPACK_FORMATS.items()] formats.sort() return formats def _check_unpack_options(extensions, function, extra_args): """Checks what gets registered as an unpacker.""" # first make sure no other unpacker is registered for this extension existing_extensions = {} for name, info in _UNPACK_FORMATS.items(): for ext in info[0]: existing_extensions[ext] = name for extension in extensions: if extension in existing_extensions: msg = '%s is already registered for "%s"' raise RegistryError(msg % (extension, existing_extensions[extension])) if not callable(function): raise TypeError('The registered function must be a callable') def register_unpack_format(name, extensions, function, extra_args=None, description=''): """Registers an unpack format. `name` is the name of the format. `extensions` is a list of extensions corresponding to the format. `function` is the callable that will be used to unpack archives. The callable will receive archives to unpack. If it's unable to handle an archive, it needs to raise a ReadError exception. If provided, `extra_args` is a sequence of (name, value) tuples that will be passed as arguments to the callable. description can be provided to describe the format, and will be returned by the get_unpack_formats() function. """ if extra_args is None: extra_args = [] _check_unpack_options(extensions, function, extra_args) _UNPACK_FORMATS[name] = extensions, function, extra_args, description def unregister_unpack_format(name): """Removes the pack format from the registry.""" del _UNPACK_FORMATS[name] def _ensure_directory(path): """Ensure that the parent directory of `path` exists""" dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) def _unpack_zipfile(filename, extract_dir): """Unpack zip `filename` to `extract_dir` """ import zipfile # late import for breaking circular dependency if not zipfile.is_zipfile(filename): raise ReadError("%s is not a zip file" % filename) zip = zipfile.ZipFile(filename) try: for info in zip.infolist(): name = info.filename # don't extract absolute paths or ones with .. in them if name.startswith('/') or '..' in name: continue targetpath = os.path.join(extract_dir, *name.split('/')) if not targetpath: continue _ensure_directory(targetpath) if not name.endswith('/'): # file with zip.open(name, 'r') as source, \ open(targetpath, 'wb') as target: copyfileobj(source, target) finally: zip.close() def _unpack_tarfile(filename, extract_dir): """Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir` """ import tarfile # late import for breaking circular dependency try: tarobj = tarfile.open(filename) except tarfile.TarError: raise ReadError( "%s is not a compressed or uncompressed tar file" % filename) try: tarobj.extractall(extract_dir) finally: tarobj.close() _UNPACK_FORMATS = { 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file"), } if _ZLIB_SUPPORTED: _UNPACK_FORMATS['gztar'] = (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file") if _BZ2_SUPPORTED: _UNPACK_FORMATS['bztar'] = (['.tar.bz2', '.tbz2'], _unpack_tarfile, [], "bzip2'ed tar-file") if _LZMA_SUPPORTED: _UNPACK_FORMATS['xztar'] = (['.tar.xz', '.txz'], _unpack_tarfile, [], "xz'ed tar-file") def _find_unpack_format(filename): for name, info in _UNPACK_FORMATS.items(): for extension in info[0]: if filename.endswith(extension): return name return None def unpack_archive(filename, extract_dir=None, format=None): """Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", "gztar", "bztar", or "xztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised. """ sys.audit("shutil.unpack_archive", filename, extract_dir, format) if extract_dir is None: extract_dir = os.getcwd() extract_dir = os.fspath(extract_dir) filename = os.fspath(filename) if format is not None: try: format_info = _UNPACK_FORMATS[format] except KeyError: raise ValueError("Unknown unpack format '{0}'".format(format)) from None func = format_info[1] func(filename, extract_dir, **dict(format_info[2])) else: # we need to look at the registered unpackers supported extensions format = _find_unpack_format(filename) if format is None: raise ReadError("Unknown archive format '{0}'".format(filename)) func = _UNPACK_FORMATS[format][1] kwargs = dict(_UNPACK_FORMATS[format][2]) func(filename, extract_dir, **kwargs) if hasattr(os, 'statvfs'): __all__.append('disk_usage') _ntuple_diskusage = collections.namedtuple('usage', 'total used free') _ntuple_diskusage.total.__doc__ = 'Total space in bytes' _ntuple_diskusage.used.__doc__ = 'Used space in bytes' _ntuple_diskusage.free.__doc__ = 'Free space in bytes' def disk_usage(path): """Return disk usage statistics about the given path. Returned value is a named tuple with attributes 'total', 'used' and 'free', which are the amount of total, used and free space, in bytes. """ st = os.statvfs(path) free = st.f_bavail * st.f_frsize total = st.f_blocks * st.f_frsize used = (st.f_blocks - st.f_bfree) * st.f_frsize return _ntuple_diskusage(total, used, free) elif _WINDOWS: __all__.append('disk_usage') _ntuple_diskusage = collections.namedtuple('usage', 'total used free') def disk_usage(path): """Return disk usage statistics about the given path. Returned values is a named tuple with attributes 'total', 'used' and 'free', which are the amount of total, used and free space, in bytes. """ total, free = nt._getdiskusage(path) used = total - free return _ntuple_diskusage(total, used, free) def chown(path, user=None, group=None): """Change owner user and group of the given path. user and group can be the uid/gid or the user/group names, and in that case, they are converted to their respective uid/gid. """ sys.audit('shutil.chown', path, user, group) if user is None and group is None: raise ValueError("user and/or group must be set") _user = user _group = group # -1 means don't change it if user is None: _user = -1 # user can either be an int (the uid) or a string (the system username) elif isinstance(user, str): _user = _get_uid(user) if _user is None: raise LookupError("no such user: {!r}".format(user)) if group is None: _group = -1 elif not isinstance(group, int): _group = _get_gid(group) if _group is None: raise LookupError("no such group: {!r}".format(group)) os.chown(path, _user, _group) def get_terminal_size(fallback=(80, 24)): """Get the size of the terminal window. For each of the two dimensions, the environment variable, COLUMNS and LINES respectively, is checked. If the variable is defined and the value is a positive integer, it is used. When COLUMNS or LINES is not defined, which is the common case, the terminal connected to sys.__stdout__ is queried by invoking os.get_terminal_size. If the terminal size cannot be successfully queried, either because the system doesn't support querying, or because we are not connected to a terminal, the value given in fallback parameter is used. Fallback defaults to (80, 24) which is the default size used by many terminal emulators. The value returned is a named tuple of type os.terminal_size. """ # columns, lines are the working values try: columns = int(os.environ['COLUMNS']) except (KeyError, ValueError): columns = 0 try: lines = int(os.environ['LINES']) except (KeyError, ValueError): lines = 0 # only query if necessary if columns <= 0 or lines <= 0: try: size = os.get_terminal_size(sys.__stdout__.fileno()) except (AttributeError, ValueError, OSError): # stdout is None, closed, detached, or not a terminal, or # os.get_terminal_size() is unsupported size = os.terminal_size(fallback) if columns <= 0: columns = size.columns if lines <= 0: lines = size.lines return os.terminal_size((columns, lines)) # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can
<reponame>RobertJN64/MachineLearningLibOLD import random import json import math import warnings def inSet(strings): inputs = {} for i in strings: inputs[i] = InNode() return inputs def outSet(strings, activation_func="sigmoid"): outputs = {} for i in strings: outputs[i] = OutNode(activation_func=activation_func) return outputs def scale(minv, v, maxv, minx=-1, maxx=1): # scale to 0 to 1 return ((v - minv) / (maxv - minv)) * (maxx - minx) + minx def descale(minv, v, maxv): # descale from 0 to 1 return v * (maxv - minv) + minv class Node: # the core of the nueral net, a node must have a value def __init__(self): self.val = 0 # default the node value to 0 self.total = 0 self.count = 0 self.acivation_func = "sigmoid" def reset(self): self.total = 0 self.count = 0 def recieveValue(self, val, weight): # used to take in a value self.total += val * weight self.count += abs(weight) def calc(self): if self.acivation_func == "sigmoid": self.val = ((1 / (1 + math.pow(math.e, (-1 * self.total)))) * 2) -1 elif self.acivation_func == "relu": self.val = max(0, self.total) elif self.acivation_func == "old": self.val = self.total / self.count else: warnings.warn("Unknown activation func: " + str(self.acivation_func)) class InNode(Node): # an input def __init__(self): super().__init__() self.connections = {} # default to no connections def connect(self, cnode): self.connections[cnode] = 0 # default each connection strength to 0 def disconnect(self, dnode): try: self.connections.pop(dnode) except Exception as e: warnings.warn(str(Node) + " not found. Exception: " + str(e)) def activate(self): for nextNode in self.connections: nextNode.recieveValue(self.val, self.connections[nextNode]) # push the value through the connection def evolve(self, evoRate): for i in self.connections: self.connections[i] = customRand(self.connections[i], evoRate) class OutNode(Node): def __init__(self, activation_func="sigmoid"): super().__init__() # these nodes just hold values, so they are kind of dumb self.acivation_func = activation_func class MidNode(Node): # pretty much the same as an input node def __init__(self, activation_func="sigmoid"): super().__init__() self.connections = {} self.acivation_func = activation_func def connect(self, cnode): self.connections[cnode] = 0 def disconnect(self, dnode): try: self.connections.pop(dnode) except Exception as e: warnings.warn(str(Node) + " not found. Exception: " + str(e)) def activate(self): for nextNode in self.connections: nextNode.recieveValue(self.val, self.connections[nextNode]) # push the value through the connection def evolve(self, evoRate): for i in self.connections: self.connections[i] = customRand(self.connections[i], evoRate) class Net: # the network itself, contains many nodes def __init__(self, inputsRaw, outputsRaw, width, depth, bias=True, activation_func="sigmoid", final_activation_func="sigmoid", neat=False, datafile = None, classifier_output=None): self.activation_func = activation_func self.final_activation_func = final_activation_func self.datafile = datafile self.inputs = inSet(inputsRaw) self.outputs = outSet(outputsRaw, activation_func=final_activation_func) self.expectedInputs = inputsRaw self.expectedOutputs = outputsRaw self.bias = InNode() self.usebias = bias self.neat = neat if classifier_output is None and len(self.outputs) == 1: self.classifier_output = list(self.outputs.keys())[0] else: self.classifier_output = classifier_output self.midnodes = [] self.out = {} for i in range(0, width): midnodestemp = [] for j in range(0, depth): midnodestemp.append(MidNode(activation_func=self.activation_func)) self.midnodes.append(midnodestemp) self.width = width self.depth = depth if width == 0: for inName in self.inputs: inputNode = self.inputs[inName] for outName in self.outputs: outputNode = self.outputs[outName] inputNode.connect(outputNode) else: for inName in self.inputs: inputNode = self.inputs[inName] for midNode in self.midnodes[0]: inputNode.connect(midNode) for i in range(1, len(self.midnodes)): for midnode in self.midnodes[i - 1]: for midnode2 in self.midnodes[i]: midnode.connect(midnode2) for midnode in self.midnodes[len(self.midnodes) - 1]: for outputName in self.outputs: outputNode = self.outputs[outputName] midnode.connect(outputNode) for midnodelist in self.midnodes: for midnode in midnodelist: self.bias.connect(midnode) def setNode(self, name, val): # in self.inputs[name].val = val def getNode(self, name): # out return self.outputs[name].val def receiveInput(self, inputs): for name in inputs: self.setNode(name, scale(self.expectedInputs[name]["min"], inputs[name], self.expectedInputs[name]["max"], minx=-1, maxx=1)) def getOutput(self): out = {} for name in self.expectedOutputs: out[name] = self.getNode(name) return out def scale(self, name, val): return scale(self.expectedOutputs[name]["min"], val, self.expectedOutputs[name]["max"]) def process(self): # eval if self.usebias: self.bias.activate() for inName in self.inputs: # activate net inputNode = self.inputs[inName] inputNode.activate() for midnodelist in self.midnodes: for midnode in midnodelist: midnode.calc() midnode.activate() for outName in self.outputs: outputNode = self.outputs[outName] outputNode.calc() def reset(self): self.bias.val = -1 for name in self.inputs: # reset all inputs inputNode = self.inputs[name] inputNode.reset() for name in self.outputs: # reset all outputs outputNode = self.outputs[name] outputNode.reset() for midnodelist in self.midnodes: for midnode in midnodelist: midnode.reset() def evolve(self, evoRate): # region NEAT if self.neat: # Nuero Evolution of Augmented Topolgies if random.randint(0, 3) == 0: if random.randint(0, 2) != 0: # mostly add nodes newnode = MidNode() length = len(self.midnodes) if random.randint(0,1): #don't add stuff to the end that much length = max(0,length-1) insertlayer = random.randint(0, length) # pick one of the lists, or the end # 67% chance to do an insert instead if insertlayer != len(self.midnodes) and random.randint(0, 2) == 0: self.midnodes.insert(insertlayer, [newnode]) elif insertlayer == len(self.midnodes): self.midnodes.append([newnode]) else: self.midnodes[insertlayer].append(newnode) # ok, time to connect the node if insertlayer == len(self.midnodes) - 1: for inName in self.inputs: inputNode = self.inputs[inName] newnode.connect(inputNode) # def to 0, we evolve later anyway else: for midnode in self.midnodes[insertlayer + 1]: newnode.connect(midnode) else: # delete midnodes rawmidnodeslist = [] for midnodelist in self.midnodes: for midnode in midnodelist: rawmidnodeslist.append(midnode) if len(rawmidnodeslist) > 0: newnode = rawmidnodeslist.pop(random.randint(0, len(rawmidnodeslist) - 1)) for inName in self.inputs: inputNode = self.inputs[inName] if newnode in inputNode.connections.keys(): inputNode.disconnect(newnode) for midnode in rawmidnodeslist: if newnode in midnode.connections.keys(): midnode.disconnect(newnode) # endregion NEAT for inName in self.inputs: inputNode = self.inputs[inName] inputNode.evolve(evoRate) for midnodelist in self.midnodes: for midnode in midnodelist: midnode.evolve(evoRate) if self.usebias: self.bias.evolve(evoRate) return self def getJSON(self, name, ver): outjson = {"net_name": name, "net_ver": str(ver), "midwidth": self.width, "nodes": [], "expectedInputs": self.expectedInputs, "expectedOutputs": self.expectedOutputs, "act_func": self.activation_func, "fin_act_func": self.final_activation_func, "use-bias": self.usebias, "use-neat": self.neat, "data-file": self.datafile, "classifier-output": self.classifier_output} nodeid = 0 nodeindex = {} for name in self.inputs: nodeindex[self.inputs[name]] = {"layer": "input", "name": name, "id": nodeid} nodeid += 1 allmids = [] rowcount = 0 for row in self.midnodes: for midnode in row: allmids.append(midnode) nodeindex[midnode] = {"layer": rowcount, "id": nodeid} nodeid += 1 rowcount += 1 for name in self.outputs: nodeindex[self.outputs[name]] = {"layer": "output", "name": name, "id": nodeid} nodeid += 1 nodeindex[self.bias] = {"layer": "bias", "id": nodeid} for node in nodeindex: if nodeindex[node]["layer"] != "output": connectionjson = {} for connection in node.connections: connectionid = nodeindex[connection]["id"] connectionval = node.connections[connection] connectionjson[connectionid] = connectionval nodeindex[node]["connections"] = connectionjson outjson["nodes"].append(nodeindex[node]) return outjson def save(self, fname, name, ver, log=True): if log: print("Saving net: " + name + " with version: " + str(ver) + " to file: " + fname) data = self.getJSON(name, str(ver)) with open((fname + '.json'), 'w') as fp: json.dump(data, fp, sort_keys=True, indent=4) def customRand(cVal, evoRate): rate = 1 for i in range(0, 3): if random.randint(0, 2) == 1: rate = rate * 1.1 else: break rate = rate * evoRate newVal = cVal + ((random.random() * 2 - 1) * rate / 10) if newVal > 1: newVal = 1 elif newVal < -1: newVal = -1 return newVal def Random(inputs, outputs, length, width, depth, bias=True, activation_func="relu", final_activation_func="relu", neat=False, log=True): if log: print("Creating a set of " + str(length) + " random nets") netDB = [] for i in range(0, length): newNet = Net(inputs, outputs, width, depth, bias=bias, activation_func=activation_func, final_activation_func=final_activation_func, neat=neat) newNet.evolve(5) netDB.append([newNet, 0]) return netDB def loadNet(fname, log=True): try: with open((fname + '.json'), 'r') as fp: data = json.load(fp) if log: print("Found file with name: " + data["net_name"] + " and ver: " + data["net_ver"]) return loadNetJSON(data) except Exception as e: warnings.warn("Error loading file: " + str(e)) return Net({}, {}, 0, 0) def loadNetJSON(data): nodeindex = {} newinputs = {} newoutputs = {} newmids = [] for i in range(0, int(data["midwidth"])): newmids.append([]) newnet = Net({}, {}, 0, 0) newnet.activation_func = data["act_func"] newnet.final_activation_func = data["fin_act_func"] newnet.usebias = data["use-bias"] newnet.neat = data["use-neat"] newnet.datafile = data["data-file"] newnet.classifier_output = data["classifier-output"] for nodedata in data["nodes"]: if nodedata["layer"] == "input": newnode = InNode() nodeindex[nodedata["id"]] = newnode newinputs[nodedata["name"]] = newnode elif nodedata["layer"] == "output": newnode = OutNode(activation_func=newnet.final_activation_func) nodeindex[nodedata["id"]] = newnode newoutputs[nodedata["name"]] = newnode elif nodedata["layer"] == "bias": newnode = InNode() nodeindex[nodedata["id"]] = newnode newnet.bias = newnode else: newnode = MidNode(activation_func=newnet.activation_func) nodeindex[nodedata["id"]] = newnode newmids[int(nodedata["layer"])].append(newnode) newnet.inputs = newinputs newnet.outputs = newoutputs newnet.midnodes = newmids newnet.expectedInputs = data["expectedInputs"] newnet.expectedOutputs = data["expectedOutputs"] for nodedata in data["nodes"]: if nodedata["layer"] != "output": node = nodeindex[nodedata["id"]] for connectionnum in nodedata["connections"]: node.connections[nodeindex[int(connectionnum)]] = nodedata["connections"][connectionnum] return newnet def saveNets(netDB, fname, name, ver, log=True):
dims." % x.ndim) n = len(x) if demean: xo = x - x.mean() else: xo = x if unbiased: xi = np.arange(1, n+1) d = np.hstack((xi, xi[:-1][::-1])) else: d = n if fft: nobs = len(xo) Frf = np.fft.fft(xo, n=nobs*2) acov = np.fft.ifft(Frf*np.conjugate(Frf))[:nobs]/d return acov.real else: return (np.correlate(xo, xo, 'full')/d)[n-1:] def q_stat(x,nobs, type="ljungbox"): """ Return's Ljung-Box Q Statistic x : array-like Array of autocorrelation coefficients. Can be obtained from acf. nobs : int Number of observations in the entire sample (ie., not just the length of the autocorrelation function results. Returns ------- q-stat : array Ljung-Box Q-statistic for autocorrelation parameters p-value : array P-value of the Q statistic Notes ------ Written to be used with acf. """ x = np.asarray(x) if type=="ljungbox": ret = nobs*(nobs+2)*np.cumsum((1./(nobs-np.arange(1, len(x)+1)))*x**2) chi2 = stats.chi2.sf(ret,np.arange(1,len(x)+1)) return ret,chi2 #NOTE: Changed unbiased to False #see for example # http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm def acf(x, unbiased=False, nlags=40, confint=None, qstat=False, fft=False, alpha=None): ''' Autocorrelation function for 1d arrays. Parameters ---------- x : array Time series data unbiased : bool If True, then denominators for autocovariance are n-k, otherwise n nlags: int, optional Number of lags to return autocorrelation for. confint : scalar, optional The use of confint is deprecated. See `alpha`. If a number is given, the confidence intervals for the given level are returned. For instance if confint=95, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett\'s formula. qstat : bool, optional If True, returns the Ljung-Box q statistic for each autocorrelation coefficient. See q_stat for more information. fft : bool, optional If True, computes the ACF via FFT. alpha : scalar, optional If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to Bartlett\'s formula. Returns ------- acf : array autocorrelation function confint : array, optional Confidence intervals for the ACF. Returned if confint is not None. qstat : array, optional The Ljung-Box Q-Statistic. Returned if q_stat is True. pvalues : array, optional The p-values associated with the Q-statistics. Returned if q_stat is True. Notes ----- The acf at lag 0 (ie., 1) is returned. This is based np.correlate which does full convolution. For very long time series it is recommended to use fft convolution instead. If unbiased is true, the denominator for the autocovariance is adjusted but the autocorrelation is not an unbiased estimtor. ''' nobs = len(x) d = nobs # changes if unbiased if not fft: avf = acovf(x, unbiased=unbiased, demean=True) #acf = np.take(avf/avf[0], range(1,nlags+1)) acf = avf[:nlags+1]/avf[0] else: #JP: move to acovf x0 = x - x.mean() Frf = np.fft.fft(x0, n=nobs*2) # zero-pad for separability if unbiased: d = nobs - np.arange(nobs) acf = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs]/d acf /= acf[0] #acf = np.take(np.real(acf), range(1,nlags+1)) acf = np.real(acf[:nlags+1]) #keep lag 0 if not (confint or qstat or alpha): return acf if not confint is None: import warnings warnings.warn("confint is deprecated. Please use the alpha keyword", FutureWarning) varacf = np.ones(nlags+1)/nobs varacf[0] = 0 varacf[1] = 1./nobs varacf[2:] *= 1 + 2*np.cumsum(acf[1:-1]**2) interval = stats.norm.ppf(1-(100-confint)/200.)*np.sqrt(varacf) confint = np.array(zip(acf-interval, acf+interval)) if not qstat: return acf, confint if alpha is not None: varacf = np.ones(nlags+1)/nobs varacf[0] = 0 varacf[1] = 1./nobs varacf[2:] *= 1 + 2*np.cumsum(acf[1:-1]**2) interval = stats.norm.ppf(1-alpha/2.)*np.sqrt(varacf) confint = np.array(zip(acf-interval, acf+interval)) if not qstat: return acf, confint if qstat: qstat, pvalue = q_stat(acf[1:], nobs=nobs) #drop lag 0 if (confint is not None or alpha is not None): return acf, confint, qstat, pvalue else: return acf, qstat, pvalue def pacf_yw(x, nlags=40, method='unbiased'): '''Partial autocorrelation estimated with non-recursive yule_walker Parameters ---------- x : 1d array observations of time series for which pacf is calculated nlags : int largest lag for which pacf is returned method : 'unbiased' (default) or 'mle' method for the autocovariance calculations in yule walker Returns ------- pacf : 1d array partial autocorrelations, maxlag+1 elements Notes ----- This solves yule_walker for each desired lag and contains currently duplicate calculations. ''' xm = x - x.mean() pacf = [1.] for k in range(1, nlags+1): pacf.append(yule_walker(x, k, method=method)[0][-1]) return np.array(pacf) #NOTE: this is incorrect. def pacf_ols(x, nlags=40): '''Calculate partial autocorrelations Parameters ---------- x : 1d array observations of time series for which pacf is calculated nlags : int Number of lags for which pacf is returned. Lag 0 is not returned. Returns ------- pacf : 1d array partial autocorrelations, maxlag+1 elements Notes ----- This solves a separate OLS estimation for each desired lag. ''' #TODO: add warnings for Yule-Walker #NOTE: demeaning and not using a constant gave incorrect answers? #JP: demeaning should have a better estimate of the constant #maybe we can compare small sample properties with a MonteCarlo xlags, x0 = lagmat(x, nlags, original='sep') #xlags = sm.add_constant(lagmat(x, nlags), prepend=True) xlags = add_constant(xlags) pacf = [1.] for k in range(1, nlags+1): res = OLS(x0[k:], xlags[k:,:k+1]).fit() #np.take(xlags[k:], range(1,k+1)+[-1], pacf.append(res.params[-1]) return np.array(pacf) def pacf(x, nlags=40, method='ywunbiased', alpha=None): '''Partial autocorrelation estimated Parameters ---------- x : 1d array observations of time series for which pacf is calculated nlags : int largest lag for which pacf is returned method : 'ywunbiased' (default) or 'ywmle' or 'ols' specifies which method for the calculations to use: - yw or ywunbiased : yule walker with bias correction in denominator for acovf - ywm or ywmle : yule walker without bias correction - ols - regression of time series on lags of it and on constant - ld or ldunbiased : Levinson-Durbin recursion with bias correction - ldb or ldbiased : Levinson-Durbin recursion without bias correction alpha : scalar, optional If a number is given, the confidence intervals for the given level are returned. For instance if alpha=.05, 95 % confidence intervals are returned where the standard deviation is computed according to 1/sqrt(len(x)) Returns ------- pacf : 1d array partial autocorrelations, nlags elements, including lag zero confint : array, optional Confidence intervals for the PACF. Returned if confint is not None. Notes ----- This solves yule_walker equations or ols for each desired lag and contains currently duplicate calculations. ''' if method == 'ols': ret = pacf_ols(x, nlags=nlags) elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']: ret = pacf_yw(x, nlags=nlags, method='unbiased') elif method in ['ywm', 'ywmle', 'yw_mle']: ret = pacf_yw(x, nlags=nlags, method='mle') elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']: acv = acovf(x, unbiased=True) ld_ = levinson_durbin(acv, nlags=nlags, isacov=True) #print 'ld', ld_ ret = ld_[2] elif method in ['ldb', 'ldbiased', 'ld_biased']: #inconsistent naming with ywmle acv = acovf(x, unbiased=False) ld_ = levinson_durbin(acv, nlags=nlags, isacov=True) ret = ld_[2] else: raise ValueError('method not available') if alpha is not None: varacf = 1./len(x) interval = stats.norm.ppf(1. - alpha/2.) * np.sqrt(varacf) confint = np.array(zip(ret-interval, ret+interval)) return ret, confint else: return ret def ccovf(x, y, unbiased=True, demean=True): ''' crosscovariance for 1D Parameters ---------- x, y : arrays time series data unbiased : boolean if True, then denominators is n-k, otherwise n Returns ------- ccovf : array autocovariance function Notes ----- This uses np.correlate which does full convolution. For very long time series it is recommended to use fft convolution instead. ''' n = len(x) if demean: xo = x - x.mean(); yo = y - y.mean(); else: xo = x yo = y if unbiased: xi = np.ones(n); d = np.correlate(xi, xi, 'full') else: d = n return (np.correlate(xo,yo,'full') / d)[n-1:] def ccf(x, y, unbiased=True): '''cross-correlation function for 1d Parameters ---------- x, y : arrays time series data unbiased : boolean if True, then denominators for autocovariance is n-k, otherwise n Returns ------- ccf : array cross-correlation function of x and y Notes ----- This is based np.correlate which does full convolution. For very long time series it is recommended to use fft convolution instead. If unbiased is true, the denominator for the autocovariance is adjusted but the autocorrelation is not an unbiased estimtor. ''' cvf = ccovf(x, y, unbiased=unbiased, demean=True) return
are reachable self.assertIn("/agroup3/anarray1", self.h5file) self.assertIn("/agroup3/anarray2", self.h5file) self.assertIn("/agroup3/agroup3", self.h5file) def test01b(self): """Checking rename_node (over Groups with children 2)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01b..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.rename_node('/agroup', 'agroup3') self.h5file.rename_node('/agroup3', 'agroup4') # Now undo the past operation self.h5file.undo() # Check that it does not exist in the object tree self.assertIn("/agroup", self.h5file) self.assertNotIn("/agroup4", self.h5file) # Check that children are reachable self.assertIn("/agroup/anarray1", self.h5file) self.assertIn("/agroup/anarray2", self.h5file) self.assertIn("/agroup/agroup3", self.h5file) self.assertEqual(self.h5file.root.agroup._v_title, "Group title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/agroup", self.h5file) self.assertIn("/agroup4", self.h5file) self.assertEqual(self.h5file.root.agroup4._v_title, "Group title") # Check that children are reachable self.assertIn("/agroup4/anarray1", self.h5file) self.assertIn("/agroup4/anarray2", self.h5file) self.assertIn("/agroup4/agroup3", self.h5file) def test02(self): """Checking rename_node (over Leaves)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test02..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.rename_node('/anarray', 'anarray2') # Now undo the past operation self.h5file.undo() # Check that otherarray does not exist in the object tree self.assertIn("/anarray", self.h5file) self.assertNotIn("/anarray2", self.h5file) self.assertEqual(self.h5file.root.anarray.title, "Array title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/anarray", self.h5file) self.assertIn("/anarray2", self.h5file) self.assertEqual(self.h5file.root.anarray2.title, "Array title") def test03(self): """Checking rename_node (over Tables)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test03..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.rename_node('/table', 'table2') # Now undo the past operation self.h5file.undo() # Check that table2 does not exist in the object tree self.assertIn("/table", self.h5file) table = self.h5file.root.table self.assertIsNotNone(table.cols.var1.index) self.assertIsNotNone(table.cols.var2.index) self.assertIsNotNone(table.cols.var3.index) self.assertIsNone(table.cols.var4.index) self.assertEqual(table.cols.var1.index.nelements, minRowIndex) self.assertEqual(table.cols.var2.index.nelements, minRowIndex) self.assertEqual(table.cols.var3.index.nelements, minRowIndex) self.assertNotIn("/table2", self.h5file) self.assertEqual(self.h5file.root.table.title, "Indexed") # Redo the operation self.h5file.redo() # Check that table2 has come back to life in a sane state self.assertNotIn("/table", self.h5file) self.assertIn("/table2", self.h5file) self.assertEqual(self.h5file.root.table2.title, "Indexed") table = self.h5file.root.table2 self.assertIsNotNone(table.cols.var1.index) self.assertIsNotNone(table.cols.var2.index) self.assertIsNotNone(table.cols.var3.index) self.assertEqual(table.cols.var1.index.nelements, minRowIndex) self.assertEqual(table.cols.var2.index.nelements, minRowIndex) self.assertEqual(table.cols.var3.index.nelements, minRowIndex) self.assertIsNone(table.cols.var4.index) class MoveNodeTestCase(common.TempFileMixin, TestCase): """Tests for move_node operations""" def setUp(self): super(MoveNodeTestCase, self).setUp() h5file = self.h5file root = h5file.root # Create an array h5file.create_array(root, 'array', [1, 2], title="Title example") # Create another array object h5file.create_array(root, 'anarray', [1], "Array title") # Create a group object group = h5file.create_group(root, 'agroup', "Group title") # Create a couple of objects there h5file.create_array(group, 'anarray1', [2], "Array title 1") h5file.create_array(group, 'anarray2', [2], "Array title 2") # Create a lonely group in first level h5file.create_group(root, 'agroup2', "Group title 2") # Create a new group in the second level h5file.create_group(group, 'agroup3', "Group title 3") # Create a table in root populateTable(self.h5file.root, 'table') def test00(self): """Checking move_node (over Leaf)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test00..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.move_node('/anarray', '/agroup/agroup3') # Now undo the past operation self.h5file.undo() # Check that it does not exist in the object tree self.assertIn("/anarray", self.h5file) self.assertNotIn("/agroup/agroup3/anarray", self.h5file) self.assertEqual(self.h5file.root.anarray.title, "Array title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/anarray", self.h5file) self.assertIn("/agroup/agroup3/anarray", self.h5file) self.assertEqual(self.h5file.root.agroup.agroup3.anarray.title, "Array title") def test01(self): """Checking move_node (over Groups with children)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.move_node('/agroup', '/agroup2', 'agroup3') # Now undo the past operation self.h5file.undo() # Check that it does not exist in the object tree self.assertIn("/agroup", self.h5file) self.assertNotIn("/agroup2/agroup3", self.h5file) # Check that children are reachable self.assertIn("/agroup/anarray1", self.h5file) self.assertIn("/agroup/anarray2", self.h5file) self.assertIn("/agroup/agroup3", self.h5file) self.assertEqual(self.h5file.root.agroup._v_title, "Group title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/agroup", self.h5file) self.assertIn("/agroup2/agroup3", self.h5file) self.assertEqual(self.h5file.root.agroup2.agroup3._v_title, "Group title") # Check that children are reachable self.assertIn("/agroup2/agroup3/anarray1", self.h5file) self.assertIn("/agroup2/agroup3/anarray2", self.h5file) self.assertIn("/agroup2/agroup3/agroup3", self.h5file) def test01b(self): """Checking move_node (over Groups with children 2)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01b..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.move_node('/agroup', '/', 'agroup3') self.h5file.move_node('/agroup3', '/agroup2', 'agroup4') # Now undo the past operation self.h5file.undo() # Check that it does not exist in the object tree self.assertIn("/agroup", self.h5file) self.assertNotIn("/agroup2/agroup4", self.h5file) # Check that children are reachable self.assertIn("/agroup/anarray1", self.h5file) self.assertIn("/agroup/anarray2", self.h5file) self.assertIn("/agroup/agroup3", self.h5file) self.assertEqual(self.h5file.root.agroup._v_title, "Group title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/agroup", self.h5file) self.assertIn("/agroup2/agroup4", self.h5file) self.assertEqual(self.h5file.root.agroup2.agroup4._v_title, "Group title") # Check that children are reachable self.assertIn("/agroup2/agroup4/anarray1", self.h5file) self.assertIn("/agroup2/agroup4/anarray2", self.h5file) self.assertIn("/agroup2/agroup4/agroup3", self.h5file) def test02(self): """Checking move_node (over Leaves)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test02..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.move_node('/anarray', '/agroup2', 'anarray2') # Now undo the past operation self.h5file.undo() # Check that otherarray does not exist in the object tree self.assertIn("/anarray", self.h5file) self.assertNotIn("/agroup2/anarray2", self.h5file) self.assertEqual(self.h5file.root.anarray.title, "Array title") # Redo the operation self.h5file.redo() # Check that otherarray has come back to life in a sane state self.assertNotIn("/anarray", self.h5file) self.assertIn("/agroup2/anarray2", self.h5file) self.assertEqual( self.h5file.root.agroup2.anarray2.title, "Array title") def test03(self): """Checking move_node (over Tables)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test03..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.move_node('/table', '/agroup2', 'table2') # Now undo the past operation self.h5file.undo() # Check that table2 does not exist in the object tree self.assertIn("/table", self.h5file) self.assertNotIn("/agroup2/table2", self.h5file) table = self.h5file.root.table self.assertIsNotNone(table.cols.var1.index) self.assertIsNotNone(table.cols.var2.index) self.assertIsNotNone(table.cols.var3.index) self.assertIsNone(table.cols.var4.index) self.assertEqual(table.cols.var1.index.nelements, minRowIndex) self.assertEqual(table.cols.var2.index.nelements, minRowIndex) self.assertEqual(table.cols.var3.index.nelements, minRowIndex) self.assertEqual(self.h5file.root.table.title, "Indexed") # Redo the operation self.h5file.redo() # Check that table2 has come back to life in a sane state self.assertNotIn("/table", self.h5file) self.assertIn("/agroup2/table2", self.h5file) self.assertEqual(self.h5file.root.agroup2.table2.title, "Indexed") table = self.h5file.root.agroup2.table2 self.assertIsNotNone(table.cols.var1.index) self.assertIsNotNone(table.cols.var2.index) self.assertIsNotNone(table.cols.var3.index) self.assertEqual(table.cols.var1.index.nelements, minRowIndex) self.assertEqual(table.cols.var2.index.nelements, minRowIndex) self.assertEqual(table.cols.var3.index.nelements, minRowIndex) self.assertIsNone(table.cols.var4.index) class RemoveNodeTestCase(common.TempFileMixin, TestCase): """Test for remove_node operations""" def setUp(self): super(RemoveNodeTestCase, self).setUp() h5file = self.h5file root = h5file.root # Create an array h5file.create_array(root, 'array', [1, 2], title="Title example") # Create another array object h5file.create_array(root, 'anarray', [1], "Array title") # Create a group object group = h5file.create_group(root, 'agroup', "Group title") # Create a couple of objects there h5file.create_array(group, 'anarray1', [2], "Array title 1") h5file.create_array(group, 'anarray2', [2], "Array title 2") # Create a lonely group in first level h5file.create_group(root, 'agroup2', "Group title 2") # Create a new group in the second level h5file.create_group(group, 'agroup3', "Group title 3") # Create a table in root populateTable(self.h5file.root, 'table') def test00(self): """Checking remove_node (over Leaf)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test00..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Delete an existing array self.h5file.remove_node('/anarray') # Now undo the past operation self.h5file.undo() # Check that it does exist in the object tree self.assertIn("/anarray", self.h5file) self.assertEqual(self.h5file.root.anarray.title, "Array title") # Redo the operation self.h5file.redo() # Check that array has gone again self.assertNotIn("/anarray", self.h5file) def test00b(self): """Checking remove_node (over several Leaves)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test00b..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Delete a couple of arrays self.h5file.remove_node('/anarray') self.h5file.remove_node('/agroup/anarray2') # Now undo the past operation self.h5file.undo() # Check that arrays has come into life self.assertIn("/anarray", self.h5file) self.assertIn("/agroup/anarray2", self.h5file) self.assertEqual(self.h5file.root.anarray.title, "Array title") self.assertEqual( self.h5file.root.agroup.anarray2.title, "Array title 2") # Redo the operation self.h5file.redo() # Check that arrays has disappeared again self.assertNotIn("/anarray", self.h5file) self.assertNotIn("/agroup/anarray2", self.h5file) def test00c(self): """Checking remove_node (over Tables)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test00c..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Create a new array self.h5file.remove_node('/table') # Now undo the past operation self.h5file.undo() # Check that table2 does not exist in the object tree self.assertIn("/table", self.h5file) table = self.h5file.root.table self.assertIsNotNone(table.cols.var1.index) self.assertIsNotNone(table.cols.var2.index) self.assertIsNotNone(table.cols.var3.index) self.assertIsNone(table.cols.var4.index) self.assertEqual(table.cols.var1.index.nelements, minRowIndex) self.assertEqual(table.cols.var2.index.nelements, minRowIndex) self.assertEqual(table.cols.var3.index.nelements, minRowIndex) self.assertEqual(self.h5file.root.table.title, "Indexed") # Redo the operation self.h5file.redo() # Check that table2 has come back to life in a sane state self.assertNotIn("/table", self.h5file) def test01(self): """Checking remove_node (over Groups with children)""" if common.verbose: print('\n', '-=' * 30) print("Running %s.test01..." % self.__class__.__name__) # open the do/undo self.h5file.enable_undo() # Delete a group recursively self.h5file.remove_node('/agroup', recursive=1) # Now undo the past operation self.h5file.undo() # Check that parent and children has come into life in a sane state self.assertIn("/agroup", self.h5file) self.assertIn("/agroup/anarray1", self.h5file) self.assertIn("/agroup/anarray2", self.h5file) self.assertIn("/agroup/agroup3", self.h5file) self.assertEqual(self.h5file.root.agroup._v_title, "Group title") # Redo the operation self.h5file.redo() # Check that parent and children are not reachable self.assertNotIn("/agroup", self.h5file) self.assertNotIn("/agroup/anarray1", self.h5file) self.assertNotIn("/agroup/anarray2", self.h5file) self.assertNotIn("/agroup/agroup3", self.h5file) def test01b(self): """Checking remove_node (over Groups
<filename>func_moead.py # -*- coding: utf-8 -*- """ Author: <NAME> <<EMAIL>> website: http://www.cs.cityu.edu.hk/~xilin4/ github: This code is a demo for this paper: A Decomposition based Multiobjective Evolutionary Algorithm with Classification <NAME>, <NAME>, <NAME> Proceedings of the 2016 IEEE Congress on Evolutionary Computation (CEC16) Vancouver, Canada, July 2016 """ import os import sys import copy import numpy as np import scipy from sklearn import svm from test_instances import * path = os.path.abspath(os.path.dirname(sys.argv[0])) class Problem: """Multi-objective Problem name: MoP name dim: dimension of decision space nobj:dimension of objective space domain: domain for decision variable """ def __init__(self,name,dim,nobj,domain = None): self.name = name self.dim = dim self.nobj = nobj if domain is None: self.domain = np.tile(np.array([0,1]),(self.dim,1)) def evaluate(self,x): #evaluate objective values for given decision variable return testfunc(self,x) class Params: """Parameters for MOEA/D popsize: population size niche: neighbourhood size dmethod: decomposition method iteration: number of maximum iterations in each run; not used in this demo stop_nfeval: number of maximum function evaluations in each run updatedprob: probability that parent solutions are selected from the neighbourhood, but not the whole population updatednb: maximum number of current solutions which would be replaced by each new solution F, CR: parameters for DE operator """ def __init__(self,popsize,niche,dmethod,iteration, stop_nfeval,updateprob,updatenb,F,CR): self.popsize = popsize self.niche = niche self.dmethod = dmethod self.iteration = iteration self.stop_nfeval = stop_nfeval self.updateprob = updateprob self.updatenb = updatenb self.F = F self.CR = CR class Subproblem: """Subproblem in MOEA/D weight: decomposition weight neighbour: index of neighbours curpoint: Individual Class current best solution subpoint: Individual Class current sub-best solution, for classification training """ def __init__(self,weight,mop,params): self.weight = weight self.neighbour = np.full(params.niche,np.nan) self.curpoint = np.full(mop.dim,np.nan) self.subpoint = np.full(mop.dim,np.nan) class Individual: """Solution in MOEA/D parameter: decision variable value: objective value """ def __init__(self,parameter): self.parameter = parameter self.value = float('Inf') def init_params(ins): """Initialize parameters for test instance The given parameters in this function are the same as in the paper: A Decomposition based Multiobjective Evolutionary Algorithm with Classification <NAME>, <NAME>, <NAME> IEEE World Congress on Computational Intelligence(IEEE WCCI), Vancouver, Canada, July 2016 Parameters ---------- ins: name for test instance Returns ------- dim: dimension of decision space nobj: dimenson of objective space popsize: population size niche: neighbourhood size stop_nfeval: number of maximum function evaluations in each run """ if ins in ['ZDT1','ZDT2','ZDT3']: dim = 30 nobj = 2 popsize = 300 niche = 30 stop_nfeval = 100000 if ins in ['ZDT4','ZDT6']: dim = 10 nobj = 2 popsize = 300 niche = 30 stop_nfeval = 100000 if ins in ['DTLZ1']: dim = 10 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 100000 if ins in ['DTLZ2']: dim = 30 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 100000 if ins in ['UF1','UF2','UF3','UF4','UF5','UF6','UF7']: dim = 30 nobj = 2 popsize = 600 niche = 30 stop_nfeval = 300000 if ins in ['UF8','UF9','UF10']: dim = 30 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 300000 return dim, nobj, popsize, niche, stop_nfeval def init_point(mop): """Initialize a solution with randomly generated decision variable Parameters ---------- mop: Problem Class multi-objective problem to be sloved Returns ------- point: Individual Class a solution with randomly generated decision variable, which is not evaluated yet """ lowend = mop.domain[:,0] span = mop.domain[:,1] - lowend para = lowend + span * np.random.rand(mop.dim) point = Individual(para) return point def init_subproblem_classification(mop,params): """Initialize all subproblems and ideal point for MOEA/D-SVM Parameters ---------- mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d Returns ------- subproblems: Subproblem Class all subproblems initialized accroding to mop and params idealpoint: estimated idealpoint for Tchebycheff decomposition """ #load already genereted weights vector in weight file weights = np.loadtxt(path + "/weight/W%dD_%d.dat"%(mop.nobj,params.popsize)) idealpoint = np.ones(mop.nobj) * float('Inf') subproblems = [] #initialize Subproblem Class for each weight vetor for i in range(params.popsize): sub = Subproblem(weights[i],mop,params) subproblems.append(sub) #distmat[i,j] is the distance btw sub[i] and sub[j], distmat[i,i] = nan distmat = np.full([params.popsize, params.popsize],np.nan) #initialize current best/sub-best point for each subproblem and idealpoint for i in range(params.popsize): for j in range(i+1,params.popsize): a = subproblems[i].weight b = subproblems[j].weight distmat[i,j] = np.linalg.norm(a - b) distmat[j,i] = distmat[i,j] #calculate the neighbourhood for each subproblem subproblems[i].neighbour = distmat[i,].argsort()[0:params.niche] subproblems[i].curpoint = init_point(mop) subproblems[i].curpoint.value = mop.evaluate( subproblems[i].curpoint.parameter) subproblems[i].subpoint = init_point(mop) subproblems[i].subpoint.value = mop.evaluate( subproblems[i].subpoint.parameter) idealpoint = np.minimum.reduce([idealpoint, subproblems[i].curpoint.value, subproblems[i].subpoint.value]) #swap(curpoint,subpoint) if g_i(subpoint) < g_i(curpoint) #where g_i() is value function for the i-th subproblem for i in range(params.popsize): curvalue = subobjective_vec(subproblems[i].weight, subproblems[i].curpoint.value.reshape(1,-1), idealpoint,params.dmethod) subvalue = subobjective_vec(subproblems[i].weight, subproblems[i].subpoint.value.reshape(1,-1), idealpoint,params.dmethod) if subvalue < curvalue: subproblems[i].curpoint, subproblems[i].subpoint = subproblems[i].subpoint, subproblems[i].curpoint return (subproblems, idealpoint) def init_subproblem(mop,params): """Initialize all subproblems and ideal point for MOEA/D Parameters ---------- mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d Returns ------- subproblems: Subproblem Class all subproblems initialized accroding to mop and params idealpoint: estimated idealpoint for Tchebycheff decomposition """ weights = np.loadtxt(path + "/weight/W%dD_%d.dat"%(mop.nobj,params.popsize)) idealpoint = np.ones(mop.nobj) * float('Inf') subproblems = [] #initialize Subproblem Class for each weight vetor for i in range(params.popsize): sub = Subproblem(weights[i],mop,params) subproblems.append(sub) #distmat[i,j] is the distance btw sub[i] and sub[j], distmat[i,i] = nan distmat = np.full([params.popsize, params.popsize],np.nan) #initialize current best/sub-best point for each subproblem and idealpoint for i in range(params.popsize): for j in range(i+1,params.popsize): a = subproblems[i].weight b = subproblems[j].weight distmat[i,j] = np.linalg.norm(a - b) distmat[j,i] = distmat[i,j] subproblems[i].neighbour = distmat[i,].argsort()[0:params.niche] subproblems[i].curpoint = init_point(mop) subproblems[i].curpoint.value = mop.evaluate( subproblems[i].curpoint.parameter) idealpoint = np.minimum(idealpoint,subproblems[i].curpoint.value) return (subproblems, idealpoint) def terminate(n,params): """Decide on whether to terminate current algo run or not Parameters ---------- n: number of total evaluations have been conducted in current run params: Params Class parameters for moea/d Returns ------- boolean expression True if number of total evaluations exceed params.stop_nfeval """ return n >= params.stop_nfeval def genetic_op(index,updateneighbour,mop, params,subproblems,ptype): """Generated a new solutions for the index-th subproblem Parameters ---------- index: subproblem index updateneighbour: boolean expression whether parent solutions are selected from the neighbourhood or not mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d subproblems: Subproblem Class all subproblems ptype: the type of generated solutions, always "current" in this demo Returns ------- newpoint: Individual Class a new generated solution """ #select parents parents_index = mate_select(index,updateneighbour, subproblems,params,2) #generate a new solution using DE crossover newpoint = de_crossover(index,parents_index,subproblems, params.F,params.CR,mop,ptype) #mutate new solution mutate(newpoint,mop,1.0/mop.dim,20) return newpoint def mate_select(index,updateneighbour, subproblems,params,size): """Select parents for new solution generation Parameters ---------- index: subproblem index updateneighbour: boolean expression whether parents are selected from the neighbourhood or not subproblems: Subproblem Class all subproblems params: Params Class parameters for moea/d size: number of parents selected Returns ------- selected_list: List, len(List) = size list of selected parents' indexes """ selected_list = [] #decide on whether parents are selected from the neighbourhood or not if(updateneighbour): selindex = subproblems[index].neighbour else: selindex = range(params.popsize) #select list of selected parents' indexes while len(selected_list) < size: r = np.random.rand(1)[0] parent = selindex[np.int(np.floor(len(selindex)*r))] if (not parent in selected_list): selected_list.append(parent) return selected_list def de_crossover(index,parents_index,subproblems,F,CR,mop,ptype): """Generate a new solution using DE crossover Parameters ---------- index: subproblem index parents_index: List list of selected parents' indexes subproblems: Subproblem Class all subproblems F,CR: DE parameters mop: Problem Class multi-objective problem to be sloved ptype: the type of generated solutions, always "current" in this demo Returns ------- newpoint: Individual Class a new generated solution """ #initialize new solution with randomly generated decision variable newpoint = init_point(mop) #decide the decision variable using DE crossover if ptype == 'current': x1 = subproblems[index].curpoint.parameter x2 = subproblems[parents_index[0]].curpoint.parameter
#!/usr/bin/env python # encoding: utf-8 """ @author: <NAME> 刘祥德 @license: (C) Copyright 2019-now, Node Supply Chain Manager Corporation Limited. @contact: <EMAIL> @software: @file: warp.py @time: 10/3/19 4:58 PM @version 1.0 @desc: """ import logging import os import random import cv2 import numpy as np import torch import torchvision from PIL import Image from skimage.transform import PiecewiseAffineTransform, warp from config.config import setup_logging, DEBUG from constant import * from utils.misc import label_list, AngleFactory, image2label from utils.transforms import ToUnNormalizedTensor logger_name = 'warp_logger' level = logging.INFO logger = setup_logging('.', logger_name, level) # CARI_IMG_PATH = '../datasets/Caricature-img' # FACE_IMG_PATH = '../datasets/CelebA-HQ-img' # CARI_DATASET_PATH = '../datasets/Caricature-mask' # FACE_DATASET_PATH = '../datasets/CelebAMaskHQ-mask' # CARI_DATASET_COLOR_PATH = '../datasets/Caricature-mask-color' # FACE_DATASET_COLOR_PATH = '../datasets/CelebAMaskHQ-mask-color' # FACE_WARPED = '../datasets/CelebA-HQ-img-Warped' face_img_name = '1.png' cari_img_name = '1' face_mask_path = os.path.join(FACE_MASK_PATH, face_img_name) face_path = os.path.join(FACE_IMG_PATH, '1.jpg') cari_mask_path = os.path.join(CARI_MASK_PATH, cari_img_name + '.png') cari_path = os.path.join(CARI_IMG_PATH, cari_img_name + '.jpg') face_mask = cv2.imread(face_mask_path, cv2.IMREAD_GRAYSCALE) cari_mask = cv2.imread(cari_mask_path, cv2.IMREAD_GRAYSCALE) # 'skin', 'nose', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'mouth', 'u_lip','l_lip' # sample_num_list = [50, 50, 50, 50, 50, 50, 50, 50, 50, 50] sample_num_list = [80, 50, 50, 25, 25, 25, 25, 30, 20, 20] # sample_num_list = [50, 50, 50, 25, 25, 25, 25, 30, 20, 20] # sample_num_list = [50, 50, 20, 20, 20, 20, 20, 20, 20, 20] face = cv2.imread(face_path) cari = cv2.imread(cari_path) transforms = [torchvision.transforms.Resize(512), ToUnNormalizedTensor()] transforms = torchvision.transforms.Compose(transforms) # face_torch = transforms(Image.open(face_path)) def warp_image(image, src_points=None, dst_points=None, transform=None): if transform is None: if src_points is not None and dst_points is not None: transform = get_transform(image, src_points, dst_points) else: raise Exception('Src points and dst points must not be None.') warped = warp(image, transform, output_shape=image.shape) return warped, transform def warp_nearest(image, src_points=None, dst_points=None, transform=None): if transform is None: if src_points is not None and dst_points is not None: transform = get_transform(image, src_points, dst_points) else: raise Exception('Src points and dst points must not be None.') warped = warp(image, transform, output_shape=image.shape, order=0) return warped, transform def get_transform(image, src_points, dst_points): src_points = np.array( [ [0, 0], [0, image.shape[0]], [image.shape[0], 0], list(image.shape[:2]) ] + src_points.tolist() ) dst_points = np.array( [ [0, 0], [0, image.shape[0]], [image.shape[0], 0], list(image.shape[:2]) ] + dst_points.tolist() ) tform3 = PiecewiseAffineTransform() tform3.estimate(dst_points, src_points) return tform3 def sample_arrange(src, num, label): """ Sample key points by equal spaing :param src: :param num: :return: """ arrange = len(src) # if num > len(src): # logger.info("Num out of length, return arrange: [{}]".format(src)) # return src # else: # output = np.array((1, 2), dtype=arrange.dtype) output = [] seg = arrange // num if seg == 0: msg = '[{}]: The number of sampling points exceeds the number of source points, and the original array is ' \ 'equidistantly filled.'.format(label) logger.info(msg) return insert_equal_space(src, arrange, num) seg = arrange / num for n in range(num): if int(seg * n) >= len(src): output.append((src[-1] + src[-2]) // 2) else: output.append(src[int(seg * n)]) return output def insert_equal_space(src, arrange, num): output = src.copy() need = num - arrange sample_space = need // arrange mod = need % arrange position = 1 for idx in range(arrange): # is_enough = False pre_el = src[idx] next_el = src[(idx + 1) % arrange] output = fill(pre_el, next_el, position, sample_space, output) position += (sample_space + 1) if len(output) == num: return output.reshape(-1, 2) else: for idx in range(mod): output = np.append(output, src[-1]) return output.reshape(-1, 2) def fill(pre_el, next_el, position, sample_space, output): for j in range(sample_space): sample = (pre_el + next_el) // (sample_space + 1) * (j + 1) output = np.insert(output, position + j, sample.reshape(2), axis=0) return output def is_filtered(points): return len(points) == 1 and (points == np.array([[-1, -1]])).all() def find_key_points(img, sample_num_list): import cv2 excluded_index = [1, 7] labels_tensor = np.arange(0, len(label_list)).reshape(len(label_list), 1, 1) # labels_tensor = torch.arange(0, len(label_list)).view(len(label_list), 1, 1) split_tensors = (img == labels_tensor).astype(np.uint8) point_list_sorted_by_polar = [] # np.arang kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) for index, tensor in enumerate(split_tensors): if index in excluded_index: # key_points[index] = np.array([[-1, -1]]) point_list_sorted_by_polar.append(np.array([[-1, -1]])) logger.info('Semantic label: [{}] is excluded.'.format(index)) continue color = colormap[tensor].astype(np.uint8) label = label_list[index] # gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY) # cv2.imshow('gray', gray) # ret, binary = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY) tensor = tensor * 255 # connects some semantic attribute for generating only one contours tensor = cv2.morphologyEx(tensor, cv2.MORPH_CLOSE, kernel) ret, binary = cv2.threshold(tensor, 10, 255, cv2.THRESH_BINARY) # Skin reverser color ensure finding only on contour if index == 0: binary = cv2.bitwise_not(binary) # if DEBUG: # cv2.imshow('binary', binary) # cv2.waitKey(0) tensor, contours, hierarchy = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) logger.info("Semantic label [{}] find contours: [{}]".format(label, len(contours))) if not len(contours): logger.error('Cannot find contours for semantic label [{}], return None for filtering this img.'.format( label)) return None # point_list_sorted_by_polar.append(np.array([[-1, -1]])) if len(contours) > 1: contours = [max(contours, key=cv2.contourArea)] unit_anchor = np.array([0, 1]) for points in contours: mom = cv2.moments(points) # print(points.shape) centroid = np.array([int(mom['m10'] / mom['m00']), int(mom['m01'] / mom['m00'])]) cv2.circle(color, (centroid[0], centroid[1]), 5, (0, 0, 255), -1) points = points.reshape(-1, 2) points = [[p, AngleFactory.calAngleClockwise(unit_anchor + centroid, p, centroid)] for p in points] points_sorted_by_polar = [el[0] for el in sorted(points, key=lambda el: el[1])] logger.info( "Semantic label [{}] gains [{}] contour points.".format(label, len(points_sorted_by_polar))) point_list_sorted_by_polar.append(points_sorted_by_polar) if DEBUG: dynamic_display_ordered_contour_points(index, color, points_sorted_by_polar) key_point_list = [] for index, key_points in enumerate(point_list_sorted_by_polar): label = label_list[index] if is_filtered(key_points): logger.info('Semantic tensor [{}] do not contain any contour points or filtered by configuration'.format( label)) key_point_list.append(np.array([[-1, -1]])) continue sampled_key_point = sample_arrange(key_points, sample_num_list[index], label) if len(sampled_key_point) != sample_num_list[index]: msg = 'The number of sampling points [{}] must be the same as the number [{}] specified by the configuration in [{}].'.format( len(key_points), sample_num_list[index]) logger.error(msg) return None logger.debug('Semantic label [{}] sampled: [{}].'.format(label, sampled_key_point)) key_point_list.append(sampled_key_point) return key_point_list # centriods.append((center_x, center_y)) # cv2.circle(color, (center_x, center_y), 4, (152, 255, 255), -1) # cv2.imshow('moment', color) # cv2.waitKey(0) # print(img.shape) # print(split_tensors.shape) def dynamic_display_ordered_contour_points(label_index, color, points_sorted_by_polar): tmp_path = 'polar' if not os.path.exists(tmp_path): os.mkdir(tmp_path) path = os.path.join(tmp_path, str(label_index)) if not os.path.exists(path): os.mkdir(path) # hstack = [] for index, p in enumerate(points_sorted_by_polar): if index % 20 == 0: cv2.circle(color, (p[0], p[1]), 4, (152, 255, 255), -1) cv2.imwrite(os.path.join(path, str(index)) + '.png', color) # hstack.append(color.copy()) # vstack = [] # j = 0 # for index in len(hstack): # if (index + 1) % 4: # vstack.append(np.vstack(hstack[j * 4:index])) # cv2.imwrite(os.path.join(path, str(index)) + '.png', color) # cv2.waitKey(0) def display_pair_key_points(face_src, cari_src, f_kl, c_kl): face_img = face_src.copy() cari_img = cari_src.copy() for index in range(len(f_kl)): fpts = f_kl[index] cpts = c_kl[index] r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) if is_filtered(fpts) or is_filtered(cpts): continue for idx in range(len(fpts)): cv2.circle(face_img, center=(fpts[idx][0], fpts[idx][1]), radius=2, color=(b, g, r), thickness=-1) cv2.circle(cari_img, center=(cpts[idx][0], cpts[idx][1]), radius=2, color=(b, g, r), thickness=-1) # cv2.imshow('Key points', img) # cv2.waitKey(0) return face_img, cari_img def draw_kpts(src, kpts): face_img = src.copy() for p in kpts: r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) if is_filtered(p): continue for idx in range(len(p)): cv2.circle(face_img, center=(p[0], p[1]), radius=2, color=(b, g, r), thickness=-1) # cv2.imshow('Key points', img) # cv2.waitKey(0) return face_img def draw_kpts_pil(src, kpts): img = cv2.cvtColor(np.array(src), cv2.COLOR_RGB2BGR) kpts = kpts.int().numpy().reshape(-1, 2) # img = cv2.resize(img, (0, 0), fx=scale, fy=scale) return Image.fromarray(cv2.cvtColor(draw_kpts(img, kpts), cv2.COLOR_BGR2RGB)) def warp_paired(face_img_name, cari_img_name, face_mask_path, cari_mask_path, face_path, cari_path, sample_num_list): # test_loader() # test_celeb_mask_loading() # face_mask = cv2.imread(face_mask_path, cv2.IMREAD_GRAYSCALE) # cari_mask = cv2.imread(cari_mask_path, cv2.IMREAD_GRAYSCALE) face_color = colormap[face_mask].astype(np.uint8) cari_color = colormap[cari_mask].astype(np.uint8) face = cv2.imread(face_path) cari = cv2.imread(cari_path) face = cv2.resize(face, (0, 0), fx=0.5, fy=0.5) if face_mask is None: logger.info('Loading Img Error, [{}] not found.'.format(face_mask_path)) # sample_num_list = [30, 30, 30, 30, 30, 30, 30, 30, 30, 30] # sample_num_list = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100] ckpts, fkpts, k_cari, k_face = get_paired_key_points(face_img_name, cari_img_name, face_mask, cari_mask, sample_num_list, face, cari) warped, warped_mask, warped_mask_color, transform = warped_face_mask(ckpts, face, face_color, fkpts) # x_position_map, y_position_map = build_position_map(face.shape[1], face.shape[0]) # x_position_map = make_x_position_map(1,face_mask.shape[1]).reshape() # warped_xpm, _ = warp_image(x_position_map, transform=transform) # warped_ypm, _ = warp_image(y_position_map, transform=transform) # print(x_position_map) # delta_x = (warped_xpm * 255).astype(np.uint8) - x_position_map # delta_y = (warped_ypm * 255).astype(np.uint8) - y_position_map if DEBUG: stack = np.hstack((k_face, k_cari, warped)) stack_mask = np.hstack((face_color, cari_color, warped_mask_color)) stack_mask = cv2.cvtColor(stack_mask, cv2.COLOR_RGB2BGR) stack_all = np.vstack((stack, stack_mask)) if not os.path.exists(FACE_WARPED): os.mkdir(FACE_WARPED) cv2.imwrite(os.path.join(FACE_WARPED, str(len(FACE_WARPED) + 1) + '.png'), stack_all) return warped_mask def estimate_offset_field(face_mask, cari_mask, face_img_name, cari_img_name, sample_num_list): width, height = face_mask.shape[1], face_mask.shape[0] if face_mask is None: logger.info('Loading Img Error, [{}] not found.'.format(face_mask_path)) ckpts, fkpts = get_paired_key_points(face_img_name, cari_img_name, face_mask, cari_mask, sample_num_list) return estimate_offset_field_by_kpts(fkpts, ckpts, height, width) #
<filename>tools/Blender Stuff/Plugins/KerraxImpExp/2.78 fix/scripts/addons/KrxImpExp/Krx3dsExp.py from KrxImpExp.impexp import * def FormatMsg(fmt, args): msg = (fmt) for i in range((0), (9)): argtempl = ("%" + (int_to_string(i + 1))) argpos = ((msg).find(argtempl)) if(argpos != -1): msg = ((msg)[0:argpos] + ((args)[i]) + (msg)[argpos + 2:len(msg)]) return (msg) def FormatMsg0(fmt): args = ([]) return (FormatMsg(fmt, args)) def FormatMsg1(fmt, arg1): args = ([]) args.append(arg1) return (FormatMsg(fmt, args)) def FormatMsg2(fmt, arg1, arg2): args = ([]) args.append(arg1) args.append(arg2) return (FormatMsg(fmt, args)) def FormatMsg3(fmt, arg1, arg2, arg3): args = ([]) args.append(arg1) args.append(arg2) args.append(arg3) return (FormatMsg(fmt, args)) def FormatMsg4(fmt, arg1, arg2, arg3, arg4): args = ([]) args.append(arg1) args.append(arg2) args.append(arg3) args.append(arg4) return (FormatMsg(fmt, args)) def FormatMsg5(fmt, arg1, arg2, arg3, arg4, arg5): args = ([]) args.append(arg1) args.append(arg2) args.append(arg3) args.append(arg4) args.append(arg5) return (FormatMsg(fmt, args)) def FormatMsg6(fmt, arg1, arg2, arg3, arg4, arg5, arg6): args = ([]) args.append(arg1) args.append(arg2) args.append(arg3) args.append(arg4) args.append(arg5) args.append(arg6) return (FormatMsg(fmt, args)) class TFile: def __MoveFilePos(self, ofs): self. __pos = (self. __pos + ofs) if(self. __size < self. __pos): self. __size = (self. __pos) def Init(self): self. __stream = (None) self. __name = ("") self. __mode = ("") self. __size = (0) self. __pos = (0) def Open(self, filename, mode): if(self. __stream != None): self. Close() filesz = (get_file_size(filename)) self. __stream = (open_file(filename, mode)) if(self. __stream == None): cr = ((mode).find("w") != -1) if(cr): raise RuntimeError(FormatMsg1("Could not open file for write.\nFile path: \"%1\".", filename)) else: raise RuntimeError(FormatMsg1("Could not open file for read.\nFile path: \"%1\".", filename)) self. __name = (filename) self. __mode = (mode) self. __size = (filesz) self. __pos = (0) def IsOpened(self): return (self. __stream != None) def Close(self): if(self. __stream != None): close_file(self. __stream) self. Init() def GetName(self): return (self. __name) def GetMode(self): return (self. __mode) def GetSize(self): return (self. __size) def GetPos(self): return (self. __pos) def SetPos(self, pos): if(pos != self. __pos): if(pos < 0 or pos > self. __size): raise RuntimeError(FormatMsg4("Attempt to seek file pointer out of file.\nFile path: \"%1\".\nPosition of file pointer: %2.\nAllowable range: %3..%4.", self. __name, ("0x"+hex(pos)[2: ].upper()), ("0x"+hex(0)[2: ].upper()), ("0x"+hex(self. __size)[2: ].upper()))) file_seek(self. __stream, pos); self. __pos = (pos) def Eof(self): return (self. __pos == self. __size) def WriteSignedChar(self, i): if(i < -128 or i > 127): raise RuntimeError(FormatMsg4("Could not write an integer to file.\nAn integer is out of range.\nFile path: \"%1\".\nInteger: %2.\nAllowable range: %3..%4.", self. __name, (int_to_string(i)), "-128", "127")) b = ((write_signed_char(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "1")) self. __MoveFilePos(1) def WriteUnsignedChar(self, i): if(i < 0 or i > 255): raise RuntimeError(FormatMsg4("Could not write an integer to file.\nAn integer is out of range.\nFile path: \"%1\".\nInteger: %2.\nAllowable range: %3..%4.", self. __name, (int_to_string(i)), "0", "255")) b = ((write_unsigned_char(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "1")) self. __MoveFilePos(1) def WriteBool(self, b): i = (0) if(b): i = (1) self. WriteUnsignedChar(i) def WriteSignedShort(self, i): if(i < -32768 or i > 32767): raise RuntimeError(FormatMsg4("Could not write an integer to file.\nAn integer is out of range.\nFile path: \"%1\".\nInteger: %2.\nAllowable range: %3..%4.", self. __name, (int_to_string(i)), "-32768", "32767")) b = ((write_signed_short(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "2")) self. __MoveFilePos(2) def WriteUnsignedShort(self, i): if(i < 0 or i > 65535): raise RuntimeError(FormatMsg4("Could not write an integer to file.\nAn integer is out of range.\nFile path: \"%1\".\nInteger: %2.\nAllowable range: %3..%4.", self. __name, (int_to_string(i)), "0", "65535")) b = ((write_unsigned_short(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "2")) self. __MoveFilePos(2) def WriteSignedLong(self, i): b = ((write_signed_long(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "4")) self. __MoveFilePos(4) def WriteUnsignedLong(self, i): b = ((write_unsigned_long(self. __stream, i))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "4")) self. __MoveFilePos(4) def WriteFloat(self, f): b = ((write_float(self. __stream, f))) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, "4")) self. __MoveFilePos(4) def WriteString(self, str): b = ((write_stringz(self. __stream, str))) sz = (len(str) + 1) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, (int_to_string(sz)))) self. __MoveFilePos(sz) def WriteLine(self, str): b = ((write_line(self. __stream, str))) sz = (len(str) + 2) if( not (b)): raise RuntimeError(FormatMsg2("Could not write data to file.\nFile path: \"%1\".\nSize of data: %2.", self. __name, (int_to_string(sz)))) self. __MoveFilePos(sz) def ReadSignedChar(self): i = ((read_signed_char(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "1")) self. __MoveFilePos(1) return (i) def ReadUnsignedChar(self): i = ((read_unsigned_char(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "1")) self. __MoveFilePos(1) return (i) def ReadBool(self): i = (self. ReadUnsignedChar()) return (i != 0) def ReadSignedShort(self): i = ((read_signed_short(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "2")) self. __MoveFilePos(2) return (i) def ReadUnsignedShort(self): i = ((read_unsigned_short(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "2")) self. __MoveFilePos(2) return (i) def ReadSignedLong(self): i = ((read_signed_long(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "4")) self. __MoveFilePos(4) return (i) def ReadUnsignedLong(self): i = ((read_unsigned_long(self. __stream))) if(i == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "4")) self. __MoveFilePos(4) return (i) def ReadFloat(self): f = ((read_float(self. __stream))) if(f == None): raise RuntimeError(FormatMsg3("Could not read data from file.\nFile path: \"%1\".\nPosition in file: %2.\nSize of data: %3.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()), "4")) self. __MoveFilePos(4) return (f) def ReadString(self): str = ((read_stringz(self. __stream))) if(str == None): raise RuntimeError(FormatMsg2("Could not read a null-terminated string from file.\nThe string seems to be too long.\nFile path: \"%1\".\nPosition in file: %2.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()))) sz = (len(str) + 1) self. __MoveFilePos(sz) return (str) def ReadLine(self): str = ((read_line(self. __stream))) if(str == None): raise RuntimeError(FormatMsg2("Could not read a CR+LF ended line from file.\nThe line seems to be too long.\nFile path: \"%1\".\nPosition in file: %2.", self. __name, ("0x"+hex(self. __pos)[2: ].upper()))) self. __pos = (file_tell(self. __stream)) return (str) def NewFile(): fl = (TFile()) fl.Init() return (fl) class TObjectStats: def Init(self): self. __nameInFile = ("") self. __nameInScene = ("") self. __numMtls = (0) self. __numFaces = (0) self. __numVertsInFile = (0) self. __numVertsInScene = (0) def Write(self, f): f.WriteString(self. __nameInFile) f.WriteString(self. __nameInScene) f.WriteUnsignedLong(self. __numMtls) f.WriteUnsignedLong(self. __numFaces) f.WriteUnsignedLong(self. __numVertsInFile) f.WriteUnsignedLong(self. __numVertsInScene) def Read(self, f): self. __nameInFile = (f.ReadString()) self. __nameInScene = (f.ReadString()) self. __numMtls = (f.ReadUnsignedLong()) self. __numFaces = (f.ReadUnsignedLong()) self. __numVertsInFile = (f.ReadUnsignedLong()) self. __numVertsInScene = (f.ReadUnsignedLong()) def SetNameInFile(self, nameInFile): self. __nameInFile = (nameInFile) def GetNameInFile(self): return (self. __nameInFile) def SetNameInScene(self, nameInScene): self. __nameInScene = (nameInScene) def GetNameInScene(self): return (self. __nameInScene) def SetNumMtls(self, numMtls): self. __numMtls = (numMtls) def GetNumMtls(self): return (self. __numMtls) def SetNumFaces(self, numFaces): self. __numFaces = (numFaces) def GetNumFaces(self): return (self. __numFaces) def SetNumVertsInFile(self, numVertsInFile): self. __numVertsInFile = (numVertsInFile) def GetNumVertsInFile(self): return (self. __numVertsInFile) def SetNumVertsInScene(self, numVertsInScene): self. __numVertsInScene = (numVertsInScene) def GetNumVertsInScene(self): return (self. __numVertsInScene) def NewObjectStats(): objectStats = (TObjectStats()) objectStats.Init() return (objectStats) class TSpaceTransform: def Init(self): self. __setupUnit = (5) self. __systemUnitsPerFileUnit = (1) self. __fileUnitsPerSystemUnit = (1) def Write(self, f): f.WriteSignedLong(self. __setupUnit) f.WriteFloat(self. __systemUnitsPerFileUnit) f.WriteFloat(self. __fileUnitsPerSystemUnit) def Read(self, f): self. __setupUnit = (f.ReadSignedLong()) self. __systemUnitsPerFileUnit = (f.ReadFloat()) self. __fileUnitsPerSystemUnit = (f.ReadFloat()) def GetSetupUnit(self): return (self. __setupUnit) def SetSetupUnit(self, setupUnit): self. __setupUnit = (setupUnit) def GetSystemUnitsPerFileUnit(self): return (self. __systemUnitsPerFileUnit) def SetSystemUnitsPerFileUnit(self, scaleCoef): self. __systemUnitsPerFileUnit = (scaleCoef) self. __fileUnitsPerSystemUnit = ((int_to_float(1)) / scaleCoef) def GetFileUnitsPerSystemUnit(self): return (self. __fileUnitsPerSystemUnit) def SetFileUnitsPerSystemUnit(self, scaleCoef): self. __fileUnitsPerSystemUnit = (scaleCoef) self. __systemUnitsPerFileUnit = ((int_to_float(1)) / scaleCoef) def NewSpaceTransform(): st = (TSpaceTransform()) st.Init() return (st) class TMatRenParams: def __ExtendString(self, str, newLength): str2 = (str) while(len(str2) < newLength): str2 += (" "); return (str2) def __ParsePmlFile(self, pmlFilePath): f = (NewFile()) try: f.Open(pmlFilePath, "rt") name = ("") texture = ("") while( not (f.Eof())): str = (f.ReadLine()) if((str).find("[% zCMaterial") != -1): name = ("") texture = ("") elif((str).find("name=string:") != -1): pos = ((str).find(":")) name = ((str)[pos + 1:len(str)]) elif((str).find("texture=string:") != -1): pos = ((str).find(":")) texture = ((str)[pos + 1:len(str)]) elif((str).find("[]") != -1): if((name != "") and (texture != "")): alreadyInList = (False) for i in range((0), ((len(self. __names)))): if((((self. __names)[i]) == name) and (((self. __textures)[i]) == texture)): alreadyInList = (True) break if( not (alreadyInList)): self. __names.append(name) self. __textures.append(texture) except RuntimeError as ex: None f.Close() def __CompareMaterials(self, i, j): cmp = (stricmp(((self. __names)[i]), ((self. __names)[j]))) if(cmp == 0): cmp = (stricmp(((self. __textures)[i]), ((self. __textures)[j]))) return (cmp) def __SwapMaterials(self, i, j): tmp = (((self. __names)[i])) self. __names[i] = (((self. __names)[j])) self. __names[j] = (tmp) tmp = (((self. __textures)[i])) self. __textures[i] = (((self. __textures)[j])) self. __textures[j] = (tmp) def __SortMaterials(self): for i in range((0), ((len(self. __names)))): m = (i) for j in range((i + 1), ((len(self. __names)))): if(self. __CompareMaterials(m, j) > 0): m = (j) if(m != i): self. __SwapMaterials(m, i) def Init(self): self. __names = ([]) self. __textures = ([]) self. __autoNames = (False) def SaveTextFile(self, filename): f = (NewFile()) try: f.Open(filename, "wt") str = (self. __ExtendString("Material ", 41) + " | Texture") f.WriteLine(str) for i in range((0), ((len(self. __textures)))): str = (self. __ExtendString("\"" + ((self. __names)[i]) + "\"", 41)) str = (str + " | \"" + ((self. __textures)[i]) + "\"") f.WriteLine(str) f.WriteLine("") f.WriteLine("AutoNames = " + (bool_to_string(self. __autoNames))) except RuntimeError as ex: None f.Close() def LoadTextFile(self, filename): self. Init() f = (NewFile()) try: f.Open(filename, "rt") while( not (f.Eof())): str = (f.ReadLine()) strU = (uppercase(str)) if((strU).find("AUTONAMES") != -1): pos = ((str).find("=")) if(pos != -1): strBoolVal = ((str)[pos + 1:len(str)]) while((strBoolVal)[0:1] == " "): strBoolVal = ((strBoolVal)[1:len(strBoolVal)]) while((strBoolVal)[len(strBoolVal) - 1:len(strBoolVal)] == " "): strBoolVal = ((strBoolVal)[0:len(strBoolVal) - 1]) self. __autoNames = ((string_to_bool(strBoolVal))) elif((str).find("|") != -1): pos1 = ((str).find("\"")) if(pos1 != -1): pos2 = (((str)[pos1 + 1:len(str)]).find("\"") + pos1 + 1) if(pos2 != -1): pos3 = (((str)[pos2 +
<filename>improver_tests/metadata/test_forecast_times.py # -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # (C) British Crown Copyright 2017-2020 Met Office. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Unit tests for forecast time coordinate utilities""" import unittest from datetime import datetime, timedelta import iris import numpy as np from iris.exceptions import CoordinateNotFoundError from iris.tests import IrisTest from improver.metadata.constants.time_types import TimeSpec from improver.metadata.forecast_times import ( _calculate_forecast_period, _find_latest_cycletime, forecast_period_coord, rebadge_forecasts_as_latest_cycle, unify_cycletime, ) from improver.synthetic_data.set_up_test_cubes import ( add_coordinate, set_up_variable_cube, ) from improver.utilities.warnings_handler import ManageWarnings class Test_forecast_period_coord(IrisTest): """Test the forecast_period_coord function""" def setUp(self): """Set up a test cube with a forecast period scalar coordinate""" self.cube = set_up_variable_cube(np.ones((1, 3, 3), dtype=np.float32)) def test_basic(self): """Test that an iris.coords.DimCoord is returned from a cube with an existing forecast period""" result = forecast_period_coord(self.cube) self.assertIsInstance(result, iris.coords.DimCoord) def test_no_forecast_period(self): """Test that an iris.coords.AuxCoord is returned from a cube with no forecast period""" self.cube.remove_coord("forecast_period") result = forecast_period_coord(self.cube, force_lead_time_calculation=True) self.assertIsInstance(result, iris.coords.AuxCoord) def test_values(self): """Test that the data within the coord is as expected with the expected units, when the input cube has a forecast_period coordinate. """ fp_coord = self.cube.coord("forecast_period").copy() result = forecast_period_coord(self.cube) self.assertArrayEqual(result.points, fp_coord.points) self.assertEqual(result.units, fp_coord.units) self.assertEqual(result.dtype, fp_coord.dtype) def test_values_force_lead_time_calculation(self): """Test that the data within the coord is as expected with the expected units, when the input cube has a forecast_period coordinate. """ fp_coord = self.cube.coord("forecast_period").copy() # put incorrect data into the existing coordinate so we can test it is # correctly recalculated self.cube.coord("forecast_period").points = np.array([-3600], dtype=np.int32) result = forecast_period_coord(self.cube, force_lead_time_calculation=True) self.assertArrayEqual(result.points, fp_coord.points) self.assertEqual(result.units, fp_coord.units) self.assertEqual(result.dtype, fp_coord.dtype) def test_exception_insufficient_data(self): """Test that a CoordinateNotFoundError exception is raised if forecast period cannot be calculated from the available coordinates """ self.cube.remove_coord("forecast_reference_time") self.cube.remove_coord("forecast_period") msg = "The forecast period coordinate is not available" with self.assertRaisesRegex(CoordinateNotFoundError, msg): forecast_period_coord(self.cube) class Test__calculate_forecast_period(IrisTest): """Test the _calculate_forecast_period function""" def setUp(self): """Set up test inputs (4 hour forecast period)""" cube = set_up_variable_cube(np.ones((1, 3, 3), dtype=np.float32)) self.time_coord = cube.coord("time") self.frt_coord = cube.coord("forecast_reference_time") self.fp_coord = cube.coord("forecast_period") def test_basic(self): """Test correct coordinate type is returned""" result = _calculate_forecast_period(self.time_coord, self.frt_coord) self.assertIsInstance(result, iris.coords.AuxCoord) def test_dim_coord(self): """Test it is possible to create a dimension coordinate""" result = _calculate_forecast_period( self.time_coord, self.frt_coord, dim_coord=True ) self.assertIsInstance(result, iris.coords.DimCoord) def test_values(self): """Test correct values are returned""" result = _calculate_forecast_period(self.time_coord, self.frt_coord) self.assertArrayAlmostEqual(result.points, self.fp_coord.points) self.assertEqual(result.units, self.fp_coord.units) self.assertEqual(result.dtype, self.fp_coord.dtype) def test_changing_mandatory_types(self): """Test that the data within the coord is as expected with the expected units, when mandatory standards for the forecast_period coordinate are changed. """ local_spec = TimeSpec(calendar=None, dtype=np.float64, units="hours") result = _calculate_forecast_period( self.time_coord, self.frt_coord, coord_spec=local_spec ) self.assertEqual(result.units, "hours") self.assertArrayAlmostEqual(result.points * 3600.0, self.fp_coord.points) self.assertEqual(result.dtype, np.float64) def test_bounds(self): """Test that the forecast_period coord has bounds where appropriate""" time_point = self.time_coord.points[0] self.time_coord.bounds = [[time_point - 3600, time_point]] fp_point = self.fp_coord.points[0] expected_fp_bounds = [[fp_point - 3600, fp_point]] result = _calculate_forecast_period(self.time_coord, self.frt_coord) self.assertArrayAlmostEqual(result.points, [fp_point]) self.assertArrayAlmostEqual(result.bounds, expected_fp_bounds) def test_multiple_time_points(self): """Test a multi-valued forecast period coordinate can be created""" time_point = self.time_coord.points[0] new_time_points = [time_point, time_point + 3600, time_point + 7200] new_time_coord = self.time_coord.copy(new_time_points) fp_point = self.fp_coord.points[0] expected_fp_points = [fp_point, fp_point + 3600, fp_point + 7200] result = _calculate_forecast_period(new_time_coord, self.frt_coord) self.assertArrayAlmostEqual(result.points, expected_fp_points) def test_check_time_unit_conversion(self): """Test correct values and units are returned when the input time and forecast reference time coordinates are in different units """ self.time_coord.convert_units("seconds since 1970-01-01 00:00:00") self.frt_coord.convert_units("hours since 1970-01-01 00:00:00") result = _calculate_forecast_period(self.time_coord, self.frt_coord) self.assertEqual(result, self.fp_coord) @ManageWarnings(record=True) def test_negative_forecast_period(self, warning_list=None): """Test a warning is raised if the calculated forecast period is negative""" # default cube has a 4 hour forecast period, so add 5 hours to frt self.frt_coord.points = self.frt_coord.points + 5 * 3600 result = _calculate_forecast_period(self.time_coord, self.frt_coord) warning_msg = "The values for the time" result = _calculate_forecast_period(self.time_coord, self.frt_coord) self.assertTrue(any(item.category == UserWarning for item in warning_list)) self.assertTrue(any(warning_msg in str(item) for item in warning_list)) self.assertEqual(result.points, [-3600]) class Test_rebadge_forecasts_as_latest_cycle(IrisTest): """Test the rebadge_forecasts_as_latest_cycle function""" def setUp(self): """Set up some cubes with different cycle times""" self.cycletime = "20190711T1200Z" validity_time = datetime(2019, 7, 11, 14) self.cube_early = set_up_variable_cube( np.full((4, 4), 273.15, dtype=np.float32), time=validity_time, frt=datetime(2019, 7, 11, 9), ) self.cube_late = set_up_variable_cube( np.full((4, 4), 273.15, dtype=np.float32), time=validity_time, frt=datetime(2019, 7, 11, 10), ) def test_cubelist(self): """Test a list of cubes is returned with the latest frt""" expected = self.cube_late.copy() result = rebadge_forecasts_as_latest_cycle([self.cube_early, self.cube_late]) self.assertIsInstance(result, iris.cube.CubeList) self.assertEqual(len(result), 2) for cube in result: for coord in ["forecast_reference_time", "forecast_period"]: self.assertEqual(cube.coord(coord), expected.coord(coord)) def test_cycletime(self): """Test a list of cubes using the cycletime argument""" expected_frt_point = ( self.cube_late.coord("forecast_reference_time").points[0] + 2 * 3600 ) expected_fp_point = self.cube_late.coord("forecast_period").points[0] - 2 * 3600 result = rebadge_forecasts_as_latest_cycle( [self.cube_early, self.cube_late], cycletime=self.cycletime ) for cube in result: self.assertEqual( cube.coord("forecast_reference_time").points[0], expected_frt_point ) self.assertEqual(cube.coord("forecast_period").points[0], expected_fp_point) def test_single_cube(self): """Test a single cube is returned unchanged if the cycletime argument is not set""" expected = self.cube_early.copy() (result,) = rebadge_forecasts_as_latest_cycle([self.cube_early]) for coord in ["forecast_reference_time", "forecast_period"]: self.assertEqual(result.coord(coord), expected.coord(coord)) def test_single_cube_with_cycletime(self): """Test a single cube has its forecast reference time and period updated if cycletime is specified""" expected_frt_point = ( self.cube_late.coord("forecast_reference_time").points[0] + 2 * 3600 ) expected_fp_point = self.cube_late.coord("forecast_period").points[0] - 2 * 3600 (result,) = rebadge_forecasts_as_latest_cycle( [self.cube_late], cycletime=self.cycletime ) self.assertEqual( result.coord("forecast_reference_time").points[0], expected_frt_point ) self.assertEqual(result.coord("forecast_period").points[0], expected_fp_point) class Test_unify_cycletime(IrisTest): """Test the unify_cycletime function.""" def setUp(self): """Set up a UK deterministic cube for testing.""" self.cycletime = datetime(2017, 1, 10, 6) cube_uk_det = set_up_variable_cube( np.full((4, 4), 273.15, dtype=np.float32), time=self.cycletime, frt=datetime(2017, 1, 10, 3), ) # set up forecast periods of 6, 8 and 10 hours time_points = [1484038800, 1484046000, 1484053200] cube_uk_det = add_coordinate( cube_uk_det, time_points, "time", dtype=np.int64, coord_units="seconds since 1970-01-01 00:00:00", ) self.cube_uk_det = add_coordinate(cube_uk_det, [1000], "model_id") self.cube_uk_det.add_aux_coord( iris.coords.AuxCoord(["uk_det"], long_name="model_configuration") ) def test_cubelist_input(self): """Test when supplying a cubelist as input containing cubes representing UK deterministic and UK ensemble model configuration and unifying the forecast_reference_time, so that both model configurations have a common forecast_reference_time.""" cube_uk_ens = set_up_variable_cube( np.full((3, 4, 4), 273.15, dtype=np.float32), time=self.cycletime, frt=datetime(2017, 1, 10, 4), ) # set up forecast periods of 5, 7 and 9 hours time_points = [1484031600, 1484038800, 1484046000] cube_uk_ens = add_coordinate( cube_uk_ens, time_points, "time", dtype=np.int64, coord_units="seconds since 1970-01-01 00:00:00", ) expected_uk_det = self.cube_uk_det.copy() frt_units = expected_uk_det.coord("forecast_reference_time").units frt_points = [np.round(frt_units.date2num(self.cycletime)).astype(np.int64)] expected_uk_det.coord("forecast_reference_time").points = frt_points expected_uk_det.coord("forecast_period").points = np.array([3, 5, 7]) * 3600 expected_uk_ens = cube_uk_ens.copy() expected_uk_ens.coord("forecast_reference_time").points = frt_points expected_uk_ens.coord("forecast_period").points = np.array([1, 3, 5]) * 3600 expected = iris.cube.CubeList([expected_uk_det, expected_uk_ens]) cubes = iris.cube.CubeList([self.cube_uk_det, cube_uk_ens]) result = unify_cycletime(cubes, self.cycletime) self.assertIsInstance(result, iris.cube.CubeList) self.assertEqual(result, expected) def test_single_item_cubelist_input(self): """Test when supplying a cube representing a UK deterministic model configuration only. This effectively updates the forecast_reference_time on the cube to the specified cycletime.""" expected_uk_det = self.cube_uk_det.copy() frt_units = expected_uk_det.coord("forecast_reference_time").units frt_points = [np.round(frt_units.date2num(self.cycletime)).astype(np.int64)] expected_uk_det.coord("forecast_reference_time").points = frt_points expected_uk_det.coord("forecast_period").points = np.array([3, 5, 7]) * 3600 result = unify_cycletime([self.cube_uk_det], self.cycletime) self.assertIsInstance(result, iris.cube.CubeList) self.assertEqual(result[0], expected_uk_det) def test_input_no_forecast_period_coordinate(self): """Test when supplying a cube representing a UK deterministic model configuration only. This forces a forecast_period coordinate to be created from a forecast_reference_time coordinate and a time coordinate.""" expected_uk_det = self.cube_uk_det.copy() frt_units = expected_uk_det.coord("forecast_reference_time").units frt_points = [np.round(frt_units.date2num(self.cycletime)).astype(np.int64)] expected_uk_det.coord("forecast_reference_time").points = frt_points expected_uk_det.coord("forecast_period").points = np.array([3, 5,
<filename>src/GridCal/Engine/IO/cim_parser.py # This file is part of GridCal. # # GridCal is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GridCal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GridCal. If not, see <http://www.gnu.org/licenses/>. from GridCal.Engine.basic_structures import Logger from GridCal.Engine.Core.multi_circuit import MultiCircuit from GridCal.Engine.Devices import * from math import sqrt def index_find(string, start, end): """ version of substring that matches :param string: string :param start: string to start splitting :param end: string to end splitting :return: string between start and end """ return string.partition(start)[2].partition(end)[0] class GeneralContainer: def __init__(self, id, tpe, resources=list(), class_replacements=dict()): """ General CIM object container :param header: object xml header :param tpe: type of the object (class) """ self.properties = dict() self.class_replacements = class_replacements # store the object type self.tpe = tpe # pick the object id self.id = id # list of properties which are considered as resources self.resources = resources self.terminals = list() self.base_voltage = list() self.containers = list() def parse_line(self, line): """ Parse xml line that eligibly belongs to this object :param line: xml text line """ # the parsers are lists of 2 sets of separators # the first separator tries to substring the property name # the second tries to substring the property value parsers = [[('.', '>'), ('>', '<')], [('.', ' rdf:resource'), ('rdf:resource="', '"')]] for L1, L2 in parsers: # try to parse the property prop = index_find(line, L1[0], L1[1]).strip() # try to parse the value val = index_find(line, L2[0], L2[1]) # remove the pound if len(val) > 0: if val[0] == '#': val = val[1:] val = val.replace('\n', '') if prop != "": # if val not in ["", "\n"]: # if val not in [ "\n"]: self.properties[prop] = val def merge(self, other): """ Merge the properties of this object with another :param other: GeneralContainer instance """ self.properties = {**self.properties, **other.properties} def print(self): print('Type:' + self.tpe) print('Id:' + self.id) for key in self.properties.keys(): val = self.properties[key] if type(val) == GeneralContainer: for key2 in val.properties.keys(): val2 = val.properties[key2] print(key, '->', key2, ':', val2) else: print(key, ':', val) def __str__(self): return self.tpe + ':' + self.id def get_xml(self, level=0): """ Returns an XML representation of the object Args: level: Returns: """ """ <cim:IEC61970CIMVersion rdf:ID="version"> <cim:IEC61970CIMVersion.version>IEC61970CIM16v29a</cim:IEC61970CIMVersion.version> <cim:IEC61970CIMVersion.date>2015-07-15</cim:IEC61970CIMVersion.date> </cim:IEC61970CIMVersion> """ l1 = ' ' * level # start/end tabbing l2 = ' ' * (level + 1) # middle tabbing # header xml = l1 + '<cim:' + self.tpe + ' rdf:ID="' + self.id + '">\n' # properties for prop, value in self.properties.items(): v = str(value).replace(' ', '_') # eventually replace the class of the property, because CIM is so well designed... if prop in self.class_replacements.keys(): cls = self.class_replacements[prop] else: cls = self.tpe if prop in self.resources: xml += l2 + '<cim:' + cls + '.' + prop + ' rdf:resource="#' + v + '" />\n' else: xml += l2 + '<cim:' + cls + '.' + prop + '>' + v + '</cim:' + cls + '.' + prop + '>\n' # closing xml += l1 + '</cim:' + self.tpe + '>\n' return xml class ACLineSegment(GeneralContainer): def __init__(self, id, tpe): GeneralContainer.__init__(self, id, tpe) self.base_voltage = list() self.current_limit = list() class PowerTransformer(GeneralContainer): def __init__(self, id, tpe): GeneralContainer.__init__(self, id, tpe) self.windings = list() class Winding(GeneralContainer): def __init__(self, id, tpe): GeneralContainer.__init__(self, id, tpe) self.tap_changers = list() class ConformLoad(GeneralContainer): def __init__(self, id, tpe): GeneralContainer.__init__(self, id, tpe) self.load_response_characteristics = list() class SynchronousMachine(GeneralContainer): def __init__(self, id, tpe): GeneralContainer.__init__(self, id, tpe) self.base_voltage = list() self.regulating_control = list() self.generating_unit = list() class CIMCircuit: def __init__(self): """ CIM circuit constructor """ self.elements = list() self.elm_dict = dict() self.elements_by_type = dict() # classes to read, theo others are ignored self.classes = ["ACLineSegment", "Analog", "BaseVoltage", "Breaker", "BusbarSection", "ConformLoad", "ConformLoadSchedule", "ConnectivityNode", "Control", "CurrentLimit", "DayType", "Disconnector", "Discrete", "EnergyConsumer", "EquivalentInjection", "EquivalentNetwork", "EquipmentContainer", "GeneratingUnit", "GeographicalRegion", "IEC61970CIMVersion", "Line", "LoadBreakSwitch", "LoadResponseCharacteristic", "Location", "Model", "OperationalLimitSet", "PerLengthSequenceImpedance", "PositionPoint", "PowerTransformer", "PowerTransformerEnd", "PSRType", "RatioTapChanger", "RegulatingControl", "Season", "SeriesCompensator", "ShuntCompensator", "Substation", "Switch", "SynchronousMachine", "Terminal", "TopologicalNode", "TransformerWinding", "VoltageLevel", "VoltageLimit" ] def clear(self): """ Clear the circuit """ self.elements = list() self.elm_dict = dict() self.elements_by_type = dict() @staticmethod def check_type(xml, class_types, starter='<cim:', ender='</cim:'): """ Checks if we are starting an object of the predefined types :param xml: some text :param class_types: list of CIM types :param starter string to add prior to the class when opening an object :param ender string to add prior to a class when closing an object :return: start_recording, end_recording, the found type or None if no one was found """ # for each type for tpe in class_types: # if the starter token is found: this is the beginning of an object if starter + tpe + ' rdf:ID' in xml: return True, False, tpe # if the starter token is found: this is the beginning of an object (only in the topology definition) elif starter + tpe + ' rdf:about' in xml: return True, False, tpe # if the ender token is found: this is the end of an object elif ender + tpe + '>' in xml: return False, True, tpe # otherwise, this is neither the beginning nor the end of an object return False, False, "" def find_references(self, recognised=set()): """ Replaces the references of the classes given :return: """ # for every element for element in self.elements: # for each property in the element # for prop in element.properties.keys(): for prop, ref_code in element.properties.items(): # if the value of the property is in the object ID references... if ref_code in self.elm_dict.keys(): # replace the reference by the corresponding object properties obj_idx = self.elm_dict[ref_code] ref_obj = self.elements[obj_idx] # element.properties[prop] = ref_obj # add the element type to the recognised types because it is in the referenced dictionary recognised.add(element.tpe) # A terminal points at an equipment with the property ConductingEquipment # A terminal points at a bus (topological node) with the property TopologicalNode if prop in ['ConductingEquipment', 'TopologicalNode', 'ConnectivityNode']: ref_obj.terminals.append(element) recognised.add(prop) if prop in ['BaseVoltage', 'VoltageLevel']: element.base_voltage.append(ref_obj) recognised.add(prop) if prop in ['EquipmentContainer']: element.containers.append(ref_obj) recognised.add(ref_obj.tpe) # the winding points at the transformer with the property PowerTransformer if ref_obj.tpe == 'PowerTransformer': if prop in ['PowerTransformer']: ref_obj.windings.append(element) recognised.add(prop) recognised.add(ref_obj.tpe) # The tap changer points at the winding with the property TransformerWinding if ref_obj.tpe in ['TransformerWinding', 'PowerTransformerEnd']: if prop in ['TransformerWinding', 'PowerTransformerEnd']: ref_obj.tap_changers.append(element) recognised.add(prop) recognised.add(ref_obj.tpe) # the synchronous generator references 3 types of objects if element.tpe == 'SynchronousMachine': if prop in ['BaseVoltage']: element.base_voltage.append(ref_obj) recognised.add(prop) if prop in ['RegulatingControl']: element.regulating_control.append(ref_obj) recognised.add(prop) if prop in ['GeneratingUnit']: element.generating_unit.append(ref_obj) recognised.add(prop) recognised.add(element.tpe) # a Conform load points at LoadResponseCharacteristic with the property LoadResponse if element.tpe == 'ConformLoad': if prop in ['LoadResponse']: element.load_response_characteristics.append(ref_obj) recognised.add(prop) recognised.add(element.tpe) if element.tpe == 'ACLineSegment': if prop in ['CurrentLimit']: element.current_limit.append(ref_obj) else: pass # print('Not found ', prop, ref) def parse_file(self, file_name, classes_=None): """ Parse CIM file and add all the recognised objects :param file_name: file name or path :return: """ if classes_ is None: classes = self.classes else: classes = classes_ recording = False disabled = False # Read text file line by line with open(file_name, 'r') as file_pointer: for line in file_pointer: if '<!--' in line: disabled = True if not disabled: # determine if the line opens or closes and object # and of which type of the ones pre-specified start_rec, end_rec, tpe = self.check_type(line, classes) if tpe != "": # a recognisable object was found if start_rec: id = index_find(line, '"', '">').replace('#', '') # start recording object if tpe == 'PowerTransformer': element = PowerTransformer(id, tpe) elif tpe == 'ACLineSegment': element = ACLineSegment(id, tpe) elif tpe == 'TransformerWinding': element
AFFINN = { "плох": "-5", "неблагоприятн": "-5", "скучн": "-5", "трудн": "-5", "больн": "-5", "зло": "-5", "опасн": "-5", "холодн": "-5", "отвратительн": "-5", "нелегк": "-5", "бедн": "-5", "одинок": "-5", "жесток": "-5", "грустн": "-5", "ужасн": "-5", "вынужден": "-5", "негативн": "-5", "неприятн": "-5", "погибш": "-5", "убит": "-5", "глуп": "-5", "вредн": "-5", "неправильн": "-5", "жалк": "-5", "несчастн": "-5", "печальн": "-5", "устал": "-5", "пассивн": "-5", "непонятн": "-5", "нервн": "-5", "груб": "-5", "стыдн": "-5", "ненужн": "-5", "виновн": "-5", "равнодушн": "-5", "криминальн": "-5", "жутк": "-5", "тесн": "-5", "противн": "-5", "преступн": "-5", "дурн": "-5", "агрессивн": "-5", "жирн": "-5", "разбит": "-5", "мрачн": "-5", "обидн": "-5", "бесполезн": "-5", "трагическ": "-5", "нагл": "-5", "ленив": "-5", "нищ": "-5", "дисгармоничн": "-5", "банальн": "-5", "досадн": "-5", "фальшив": "-5", "нелегальн": "-5", "неверн": "-5", "адск": "-5", "чудовищн": "-5", "коварн": "-5", "сомнительн": "-5", "неудобн": "-5", "недешев": "-5", "обречен": "-5", "уныл": "-5", "тревожн": "-5", "проклят": "-5", "враждебн": "-5", "террористическ": "-5", "циничн": "-5", "принудительн": "-5", "несправедлив": "-5", "болезнен": "-5", "эгоистичн": "-5", "шумн": "-5", "фашистск": "-5", "невыносим": "-5", "напрасн": "-5", "зависим": "-5", "коррупцион": "-5", "подозрительн": "-5", "неустойчив": "-5", "дурацк": "-5", "непредсказуем": "-5", "экстремистск": "-5", "опрометчив": "-5", "непутев": "-5", "забит": "-5", "лжив": "-5", "мучительн": "-5", "разрушительн": "-5", "продажн": "-5", "жадн": "-5", "горел": "-5", "катастрофическ": "-5", "неадекватн": "-5", "конфликтн": "-5", "безответствен": "-5", "мерзк": "-5", "злобн": "-5", "бессильн": "-5", "убог": "-5", "нацистск": "-5", "ядовит": "-5", "провальн": "-5", "скверн": "-5", "неловк": "-5", "неэффективн": "-5", "смутн": "-5", "измучен": "-5", "недостойн": "-5", "гадк": "-5", "примитивн": "-5", "воровск": "-5", "абсурдн": "-5", "идиотск": "-5", "нежелательн": "-5", "депрессивн": "-5", "несогласн": "-5", "нецензурн": "-5", "поддельн": "-5", "хренов": "-5", "отстал": "-5", "вредоносн": "-5", "неудовлетворен": "-5", "высокомерн": "-5", "необдума": "-5", "безнаказа": "-5", "уродлив": "-5", "пасмурн": "-5", "беззащитн": "-5", "неспособн": "-5", "оскорбительн": "-5", "дохл": "-5", "зловещ": "-5", "пыльн": "-5", "мафиозн": "-5", "националистическ": "-5", "мутн": "-5", "беспокойн": "-5", "бездарн": "-5", "кризисн": "-5", "труслив": "-5", "неясн": "-5", "неаккуратн": "-5", "яростн": "-5", "порочн": "-5", "невыполним": "-5", "нетрезв": "-5", "вял": "-5", "запута": "-5", "рутин": "-5", "проблематичн": "-5", "авторитарн": "-5", "шоков": "-5", "вражеск": "-5", "штрафн": "-5", "гнил": "-5", "колюч": "-5", "некачествен": "-5", "противоправн": "-5", "нечист": "-5", "нестабильн": "-5", "капризн": "-5", "неправ": "-5", "настырн": "-5", "ненормальн": "-5", "непроверен": "-5", "легкомыслен": "-5", "скользк": "-5", "скуп": "-5", "инфекцион": "-5", "раздражительн": "-5", "гневн": "-5", "рейдерск": "-5", "аморальн": "-5", "ненормативн": "-5", "ненавистн": "-5", "нелюбим": "-5", "антинародн": "-5", "неуклюж": "-5", "ужаса": "-5", "вонюч": "-5", "гнусн": "-5", "стрессов": "-5", "неизлечим": "-5", "наболевш": "-5", "нудн": "-5", "некорректн": "-5", "несовместим": "-5", "утомлен": "-5", "адов": "-5", "несчастлив": "-5", "варварск": "-5", "траурн": "-5", "недобросовестн": "-5", "унизительн": "-5", "недействительн": "-5", "кошмарн": "-5", "хакерск": "-5", "пагубн": "-5", "матерн": "-5", "репрессивн": "-5", "бестолков": "-5", "неугодн": "-5", "бесчеловечн": "-5", "безвыходн": "-5", "тяжелобольн": "-5", "кончен": "-5", "непотребн": "-5", "карательн": "-5", "безграмотн": "-5", "сердит": "-5", "невыгодн": "-5", "искажен": "-5", "рассея": "-5", "скудн": "-5", "бред": "-5", "бредов": "-5", "блядск": "-5", "скорбн": "-5", "сран": "-5", "однообразн": "-5", "ожесточен": "-5", "бездумн": "-5", "неразумн": "-5", "фиктивн": "-5", "тосклив": "-5", "плачевн": "-5", "бездушн": "-5", "самовольн": "-5", "трагичн": "-5", "мошеническ": "-5", "взрывоопасн": "-5", "плачущ": "-5", "антисоциальн": "-5", "ебан": "-5", "злокачествен": "-5", "надума": "-5", "безобразн": "-5", "блудн": "-5", "недобр": "-5", "смертоносн": "-5", "назойлив": "-5", "дебильн": "-5", "хулиганск": "-5", "неконтролируем": "-5", "неправомерн": "-5", "деструктивн": "-5", "губительн": "-5", "многогрешн": "-5", "непрофессиональн": "-5", "драматичн": "-5", "гитлеровск": "-5", "неблагодарн": "-5", "лицемерн": "-5", "бессовестн": "-5", "кровожадн": "-5", "немилостивн": "-5", "неутешительн": "-5", "падш": "-5", "вульгарн": "-5", "корыстн": "-5", "омерзительн": "-5", "неграмотн": "-5", "неразрешим": "-5", "хамск": "-5", "совков": "-5", "многострадальн": "-5", "неблагополучн": "-5", "невнятн": "-5", "непристойн": "-5", "лохов": "-5", "противозакон": "-5", "выебудн": "-5", "неумел": "-5", "изнурительн": "-5", "постыдн": "-5", "завистлив": "-5", "никчемн": "-5", "хил": "0", "паршив": "-5", "мятежн": "-5", "безнравствен": "-5", "разъярен": "-5", "строптив": "-5", "антироссийск": "-5", "коряв": "-5", "наркозависим": "-5", "нетерпим": "-5", "сатанинск": "-5", "алчн": "-5", "маниакальн": "-5", "привередлив": "-5", "подставн": "-5", "неуютн": "-5", "убыточн": "-5", "иррациональн": "-5", "чмы": "-5", "ущербн": "-5", "непригодн": "-5", "возмутительн": "-5", "загробн": "-5", "разбойн": "-5", "безрассудн": "-5", "невежествен": "-5", "колк": "-5", "гомофобн": "-5", "бесперспективн": "-5", "неполноцен": "-5", "неправосудн": "-5", "непокорн": "-5", "безрезультатн": "-5", "кощунствен": "-5", "невменя": "-5", "немыт": "-5", "бесконтрольн": "-5", "труднодоступн": "-5", "посредствен": "-5", "нищенск": "-5", "нерадив": "-5", "бесправн": "-5", "ментовск": "-5", "гоним": "-5", "развратн": "-5", "воинств": "-5", "истерическ": "-5", "инфантильн": "-5", "недостоверн": "-5", "бесславн": "-5", "истеричн": "-5", "прискорбн": "-5", "тошн": "-5", "неисполним": "-5", "тухл": "-5", "порнографическ": "-5", "невнимательн": "-5", "расистск": "-5", "своенравн": "-5", "неуда": "-5", "сукин": "-5", "незащищен": "-5", "безмозгл": "-5", "недолговечн": "-5", "краден": "-5", "некомпетентн": "-5", "пуглив": "-5", "мстительн": "-5", "меркантильн": "-5", "нежив": "-5", "черств": "-5", "неточн": "-5", "поминальн": "-5", "пессимистичн": "-5", "ад": "-5", "нелицеприятн": "-5", "вороват": "-5", "пренебрежительн": "-5", "путан": "-5", "бессердечн": "-5", "обма": "-5", "болтлив": "-5", "негодн": "-5", "асоциальн": "-5", "фигов": "-5", "непростительн": "-5", "стремн": "-5", "нахальн": "-5", "язвительн": "-5", "паразитическ": "-5", "неудачлив": "-5", "неумн": "-5", "похотлив": "-5", "запуга": "-5", "несносн": "-5", "шалав": "-5", "бесноват": "-5", "безрадостн": "-5", "некомфортн": "-5", "недоразвит": "-5", "сварлив": "-5", "незавидн": "-5", "бедствен": "-5", "косноязычн": "-5", "застойн": "-5", "бесчувствен": "-5", "бесцеремон": "-5", "чужеродн": "-5", "невоспита": "-5", "пропутинск": "-5", "шизофреническ": "-5", "чопорн": "-5", "непривлекательн": "-5", "злосчастн": "-5", "дегенеративн": "-5", "беззастенчив": "-5", "слабоумн": "-5", "презрен": "-5", "вычурн": "-5", "бесчестн": "-5", "стадн": "-5", "криклив": "-5", "клеветническ": "-5", "неуважительн": "-5", "уродск": "-5", "ругательн": "-5", "блудлив": "-5", "дрян": "-5", "злорадн": "-5", "невежлив": "-5", "пофигист": "-5", "отстойн": "-5", "отвратн": "-5", "безысходн": "-5", "враж": "-5", "негума": "-5", "обременительн": "-5", "злодейск": "-5", "самоубийствен": "-5", "злопамятн": "-5", "гибл": "-5", "замызган": "-5", "деспотичн": "-5", "злонамерен": "-5", "недружелюбн": "-5", "малоприятн": "-5", "неопрятн": "-5", "фашиств": "-5", "криминоген": "-5", "тошнотворн": "-5", "чванлив": "-5", "зловредн": "-5", "напыщен": "-5", "никудышн": "-5", "халатн": "-5", "самонадея": "-5", "слащав": "-5", "болезнетворн": "-5", "захудал": "-5", "похабн": "-5", "чахл": "-5", "непорядочн": "-5", "херн": "-5", "удручен": "-5", "неприветлив": "-5", "ублюдочн": "-5", "уничижительн": "-5", "ебал": "-5", "паскудн": "-5", "агресивн": "-5", "неухожен": "-5", "хвастлив": "-5", "беспутн": "-5", "фамильярн": "-5", "неряшлив": "-5", "бестактн": "-5", "нелюдим": "-5", "проигрышн": "-5", "протухш": "-5", "придурковат": "-5", "развязн": "-5", "маразматическ": "-5", "скушн": "-5", "нетолерантн": "-5", "античеловеческ": "-5", "богохульн": "-5", "жлобск": "-5", "стервозн": "-5", "неблаговидн": "-5", "ворчлив": "-5", "мнительн": "-5", "голим": "-5", "неприязнен": "-5", "изуверск": "-5", "показушн": "-5", "братоубийствен": "-5", "мракобесн": "-5", "человеконенавистническ": "-5", "сволочн": "-5", "тупоголов": "-5", "озверел": "-5", "хитрожоп": "-5", "плешив": "-5", "зубодробительн": "-5", "льстив": "-5", "подхалимск": "-5", "фашистк": "-5", "срамн": "-5", "уебищн": "-5", "безалаберн": "-5", "нравоучительн": "-5", "бескультурн": "-5", "антигума": "-5", "дибильн": "-5", "спесив": "-5", "заскорузл": "-5", "визглив": "-5", "засран": "-5", "фригидн": "-5", "ссан": "-5", "неотеса": "-5", "мерзопакостн": "-5", "холопск": "-5", "внагл": "-5", "блевотн": "-5", "обдолбан": "-5", "мерзостн": "-5", "педерастическ": "-5", "драчлив": "-5", "помоечн": "-5", "психова": "-5", "запойн": "-5", "быдляч": "-5", "заподл": "-5", "ужастн": "-5", "говнян": "-5", "наебалов": "-5", "разорительн": "-5", "жуликоват": "-5", "загребущ": "-5", "уебанск": "-5", "плебейск": "-5", "мягкотел": "-5", "бесхребетн": "-5", "фашистськ": "-5", "толстопуз": "-5", "занудлив": "-5", "шкодн": "-5", "порносайтов": "-5", "гаденьк": "-5", "худ": "-5", "суров": "-5", "смертельн": "-5", "недостаточн": "-5", "ранен": "-5", "похорон": "-5", "уязвим": "-5", "скептическ": "-5", "злостн": "-5", "нерешительн": "-5", "безбожн": "-5", "скотск": "-5", "гребан": "-5", "удруча": "-5", "дерьмов": "-5", "неуравновешен": "-5", "истошн": "-5", "безутешн": "-5", "незаслужен": "-5", "еба": "-5", "хуйн": "-5", "злочин": "-5", "бедств": "-5", "устраша": "-5", "хрен": "-5", "неудачн": "-5", "чуж": "-5", "отчая": "-5", "посмертн": "-5", "кос": "-1", "напорист": "-5", "горестн": "-5", "убиен": "-5", "бран": "-5", "дут": "0", "огульн": "-5", "дур": "-5", "дерзновен": "-5", "понур": "-5", "стран": "0", "свиняч": "-5", "задержан": "-5", "ниху": "-5", "матер": "-5", "агонизирова": "-5", "афигет": "-5", "ахренет": "-5", "ахринет": "-5", "ахуест": "-5", "ахуен": "5", "ахует": "-5", "аху": "-5", "бедствова": "-5", "безобразнича": "-5", "бес": "-5", "беснова": "-5", "бесчинствова": "-5", "бичева": "-5", "блева": "-5", "блуд": "-5", "бляяяяяя": "-5", "болет": "-5", "бомжева": "-5", "боя": "-5", "бунтова": "-5", "буха": "-5", "бухтет": "-5", "буян": "-5", "ваххаб": "-5", "вдребезг": "-5", "взаш": "-5", "взбаламут": "-5", "взбес": "-5", "влипнут": "-5", "вляпа": "-5", "воева": "-5", "возмуща": "-5", "возмущен": "-5", "возненавидет": "-5", "воня": "-5", "вразнос": "-5", "вреднича": "-5", "втюха": "-5", "втюхива": "-5", "выделыва": "-5", "выеба": "-5", "выебыва": "-5", "выжра": "-5", "вымога": "-5", "выпендр": "-5", "выпорот": "-5", "выруга": "-5", "высеч": "-5", "вышвырнут": "-5", "вяка": "-5", "вякнут": "-5", "гад": "-5", "гаркнут": "-5", "гибнут": "-5", "глум": "-5", "глупет": "-5", "гнева": "-5", "гнев": "-5", "гноб": "-5", "голода": "-5", "гомосеч": "-5", "гоп": "-5", "гор": "-1", "горева":
attributes for key in args: setattr(self, key, args[key]) def get(self, dbrow_or_id): """get from database for form""" # check if id supplied, if so retrieve dbrow if type(dbrow_or_id) in [int, str]: dbrow = self.tablemodel.query().filter_by(id=dbrow_or_id).one() else: dbrow = dbrow_or_id return self.truedisplay if getattr(dbrow, self.dbfield) else self.falsedisplay def sqla_expr(self): ''' get from database when using serverside = True, for use with ColumnDT :return: sqlalchemy expression ''' return renderboolean( get_dbattr(self.tablemodel, self.dbfield), truedisplay=self.truedisplay, falsedisplay=self.falsedisplay ) def set(self, formrow): """set to database from form""" return formrow[self.formfield] == self.truedisplay def options(self): return [{'label': self.truedisplay, 'value': self.truedisplay}, {'label': self.falsedisplay, 'value': self.falsedisplay}] class DteDbDependent(): ''' define dependent options between fields * model - which when changed uses options from dependent model * modelfield - field within model to drive changes in dependent model - default 'id' * depmodel - dependent model * depmodelref - field which refers back to model * depmodelfield - field in dependent model which are displayed to user * depvaluefield - field in dependent model which is used as value for select and to retrieve record, passed on Editor interface default 'id' - needs to be a key for model record e.g., class Parent(Base): __tablename__ = 'parent' id = Column(Integer, primary_key=True) child_id = Column(Integer, ForeignKey('child.id')) child = relationship("Child", backref="parent") class Child(Base): __tablename__ = 'child' name = Column(String) id = Column(Integer, primary_key=True) parent_id = Column( Integer, ForeignKey('parent.id') ) parent = relationship( 'Parent', backref='children', lazy=True ) TODO: add more detail here -- this is confusing children = DteDbDependent(model=Parent, modelfield='id', depmodel=Child, depmodelref='parent', depmodelfield='name', depformfield='formfieldname', depvaluefield='id', ) children is callable function which returns tree suitable for tables.CrudApi _update.options ''' def __init__(self, **kwargs): # the args dict has default values for arguments added by this class # caller supplied keyword args are used to update these # all arguments are made into attributes for self by the inherited class args = dict(model=None, modelfield='id', depmodel=None, defmodelref=None, depmodelfield=None, depformfield=None, depvaluefield='id', ) args.update(kwargs) # some of the args are required reqdfields = ['model', 'modelfield', 'depmodel', 'depmodelfield', 'depvaluefield'] for field in reqdfields: if not args[field]: raise ParameterError('{} parameters are all required'.format(', '.join(reqdfields))) # set arguments as class attributes for key in args: setattr(self, key, args[key]) def __call__(self): dbvals = self.model.query.all() vals = [getattr(v, self.modelfield) for v in dbvals] retoptions = {} for val in vals: retoptions[val] = {'options': {}} # make convenient handle formoptions = retoptions[val]['options'][self.depformfield] = [] # retrieve all dependent rows which refer to val query = {self.depmodelref: val} dbopts = self.depmodel.query.filter_by(**query).all() # add these to the options for dbopt in dbopts: formoptions.append({'label': getattr(dbopt, self.depmodelfield), 'value': getattr(dbopt, self.depvaluefield)}) return retoptions class DbCrudApi(CrudApi): ''' This class extends CrudApi. This extension uses sqlalchemy to read / write to a database Additional parameters for this class: db: database object a la sqlalchemy model: sqlalchemy model for the table to read/write from dbmapping: mapping dict with key for each db field, value is key in form or function(dbentry) formmapping: mapping dict with key for each form row, value is key in db row or function(form) queryparams: dict of query parameters relevant to this table to retrieve table or rows (using filter_by()) queryfilters: list of query criteria relevant to this table to retrieve table or rows (using filter()) dtoptions: datatables options to override / add version_id_col: name of column which contains version id checkrequired: True causes checks of columns with className: 'field_req' **dbmapping** is dict like {'dbattr_n':'formfield_n', 'dbattr_m':f(form), ...} **formmapping** is dict like {'formfield_n':'dbattr_n', 'formfield_m':f(dbrow), ...} if order of operation is important for either of these use OrderedDict **clientcolumns** should be like the following. See https://datatables.net/reference/option/columns and https://editor.datatables.net/reference/option/fields for more information [ { 'data': 'service', 'name': 'service', 'label': 'Service Name' }, { 'data': 'key', 'name': 'key', 'label': 'Key', 'render':'$.fn.dataTable.render.text()' }, { 'data': 'secret', 'name': 'secret', 'label': 'Secret', 'render':'$.fn.dataTable.render.text()' }, { 'data': 'service', 'name': 'service_id', 'label': 'Service Name', 'type': 'selectize', 'options': [{'label':'yes', 'value':1}, {'label':'no', 'value':0}], 'opts': { 'searchField': 'label', 'openOnFocus': False }, '_update' { 'endpoint' : <url endpoint to retrieve options from>, 'on' : <event> 'wrapper' : <wrapper for query response> } {'data': 'name', 'name': 'name', 'label': 'Name', 'type': 'readonly', '_ColumnDT_args' : {'sqla_expr': localuser_invites_alias1.name}, 'aliased': localuser_invites_alias1, 'onclause': localuser_invites_alias1.id == Invite.user_id, }, }, ] * name - describes the column and is used within javascript * data - used on server-client interface and should be used in the formmapping key and dbmapping value * label - used for the DataTable table column and the Editor form label * optional render key is eval'd into javascript * id - is specified by idSrc, and should be in the mapping function but not columns additionally the update option can be used to _update the options for any type = 'select', 'selectize' * _update - dict with following keys * endpoint - url endpoint to retrieve new options * on - event which triggers update. supported events are * 'open' - triggered when form opens (actually when field is focused) * 'change' - triggered when field changes - use wrapper to indicate what field(s) are updated * wrapper - dict which is wrapped around query response. value '_response_' indicates where query response should be placed * _treatment - dict with (only) one of following keys - note this causes override of dbmapping and formmapping configuration * boolean - {DteDbBool keyword parameters} * relationship - {'editable' : { 'api':<DbCrudApi()> }} 'editable' is set only if it is desired to bring up a form to edit the underlying model row 'optionspicker' : <DteDbOptionsPickerBase()> based class instance OR DteDbRelationship keyword parameters (backwards compatibility) * _ColumnDT_args - dict with keyword arguments passed to ColumnDT for serverside processing * aliased - class resulting from sqlalchemy.orm.aliased() * onclause - expression used to limit outer join, can go with aliased see https://stackoverflow.com/a/46810551/799921 **serverside** - if present table will be displayed through ajax get calls **version_id_col** - if present edits to this table are protected using optimistic concurrency control * https://docs.sqlalchemy.org/en/13/orm/versioning.html * see https://en.wikipedia.org/wiki/Optimistic_concurrency_control * also https://martinfowler.com/eaaCatalog/optimisticOfflineLock.html * this column is automatically added to dbmapping, formmapping and clientcolumns * e.g., for version_id_col='version_id', database model for this table should have code like ``` version_id = Column(Integer, nullable=False) __mapper_args__ = { 'version_id_col' : version_id } ``` ''' # class specific imports here so users of other classes do not need to install def __init__(self, **kwargs): if debug: current_app.logger.debug('DbCrudApi.__init__()') # the args dict has default values for arguments added by this derived class # caller supplied keyword args are used to update these # all arguments are made into attributes for self by the inherited class args = dict(db=None, model=None, dbmapping={}, formmapping={}, version_id_col=None, serverside=False, # duplicated here and in CrudApi because test before super() called queryparams={}, queryfilters=[], dtoptions={}, filtercoloptions=[], checkrequired=None, # TODO: should this be made more general? Maybe a function to check col ) args.update(kwargs) # make sure '_treatment', '_unique' and '_ColumnDT_args' column options are removed before invoking DataTables and Editor args['filtercoloptions'] += ['_treatment', '_unique', '_ColumnDT_args'] # make copy of dbmapping and formmapping # Need to do this because we update the mapping with functions. # view class gets reinstantiated when page painted, so we'll need to make sure we # don't corrupt the original data self.formmapping = deepcopy(args['formmapping']) self.dbmapping = deepcopy(args['dbmapping']) # keep track of columns which must be unique in the database self.uniquecols = [] # update parameters if version_col_id is specified version_id_col = args['version_id_col'] if version_id_col: self.occupdate = False self.formmapping[version_id_col] = version_id_col self.dbmapping[version_id_col] = lambda form: int(form['version_id']) if form['version_id'] else 0 versioncol = { 'name': version_id_col, 'data': version_id_col, 'ed': {'type': 'hidden'}, 'dt': {'visible': False}, } # this code comes through multiple times so need to prevent from being added twice # should consider alternative of deepcopy() like mapping arguments if version_id_col not in [c['data'] for c in args['clientcolumns']]: args['clientcolumns'].append(versioncol) # for serverside processing, self.servercolumns is built up from column data if args['serverside']: # keep track of needed columns, args, and joins
:obj:`Conclusion`: conclusions for model """ if '__type' in kwargs: __type = kwargs.pop('__type') return self.conclusions.get(__type=__type, **kwargs) def get_references(self, __type=None, **kwargs): """ Get all references from model and children Args: __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs to find matching objects Returns: :obj:`list` of :obj:`Reference`: references """ if '__type' in kwargs: __type = kwargs.pop('__type') return self.references.get(__type=__type, **kwargs) def get_authors(self, __type=None, **kwargs): """ Get all authors from model and children Args: __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs to find matching objects Returns: :obj:`list` of :obj:`Author`: authors """ if '__type' in kwargs: __type = kwargs.pop('__type') return self.authors.get(__type=__type, **kwargs) def get_changes(self, __type=None, **kwargs): """ Get all changes from model and children Args: __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs to find matching objects Returns: :obj:`list` of :obj:`Change`: changes """ if '__type' in kwargs: __type = kwargs.pop('__type') return self.changes.get(__type=__type, **kwargs) def get_components(self, __type=None, **kwargs): """ Find model component of `type` with `id` Args: __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs to find matching objects Returns: :obj:`obj_tables.Model`: component with `id`, or `None` if there is no component with `id`=`id` """ if '__type' in kwargs: __type = kwargs.pop('__type') if __type: type_names = [stringcase.snakecase(__type.__name__) + 's'] else: type_names = [ 'submodels', 'compartments', 'species_types', 'species', 'distribution_init_concentrations', 'observables', 'functions', 'dfba_objs', 'reactions', 'rate_laws', 'dfba_obj_reactions', 'dfba_obj_species', 'stop_conditions', 'parameters', 'observations', 'observation_sets', 'evidence', 'conclusions', 'references', 'authors', 'changes', ] components = [] for type_name in type_names: get_func = getattr(self, 'get_' + type_name) components.extend(get_func(__type=__type, **kwargs)) return components def merge_attrs(self, other, other_objs_in_self, self_objs_in_other): """ Merge attributes of two objects Args: other (:obj:`Model`): other model other_objs_in_self (:obj:`dict`): dictionary that maps instances of objects in another model to objects in a model self_objs_in_other (:obj:`dict`): dictionary that maps instances of objects in a model to objects in another model """ self.Meta.attributes['comments'].merge(self, other, other_objs_in_self, self_objs_in_other) for attr in self.Meta.attributes.values(): if isinstance(attr, obj_tables.RelatedAttribute): attr.merge(self, other, other_objs_in_self, self_objs_in_other) def export_to_sbml(self, sbml_model): """ Add this metadata about this model to a SBML model. Args: sbml_model (:obj:`libsbml.Model`): SBML model Returns: :obj:`libsbml.Model`: SBML model """ if self.submodels: return call_libsbml(sbml_model.setIdAttribute, self.gen_sbml_id()) call_libsbml(sbml_model.setName, self.name) LibSbmlInterface.set_commments(self, sbml_model) return sbml_model def export_relations_to_sbml(self, sbml_model, sbml): """ Add relationships to/from object to SBML model. Args: sbml_model (:obj:`libsbml.Model`): SBML model sbml (:obj:`libsbml.Model`): SBML model """ if self.submodels: return annots = [] annots.extend(['version', 'url', 'branch', 'revision', 'wc_lang_version', 'identifiers', 'updated', 'created']) if self.taxon: annots.extend(['taxon.id', 'taxon.name', 'taxon.rank', 'taxon.identifiers', 'taxon.comments']) if self.env: annots.extend(['env.id', 'env.name', 'env.temp', 'env.temp_units', 'env.identifiers', 'env.comments']) xml_annotation = '<annotation><wcLang:annotation>' \ + LibSbmlInterface.gen_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml) \ + LibSbmlInterface.gen_authors_annotation(self) \ + '</wcLang:annotation></annotation>' call_libsbml(sbml.setAnnotation, xml_annotation) def import_from_sbml(self, sbml): """ Load from SBML model Args: sbml (:obj:`libsbml.Model`): SBML model """ self.id = self.parse_sbml_id(call_libsbml(sbml.getIdAttribute)) self.name = call_libsbml(sbml.getName) LibSbmlInterface.get_commments(self, sbml) def import_relations_from_sbml(self, sbml, objs): """ Load relationships from SBML model Args: sbml (:obj:`libsbml.Model`): SBML model objs (:obj:`dict`): dictionary that maps WC-Lang types to dictionaries that map the ids of WC-Lang objects to WC-Lang objects """ parsed_annots = LibSbmlInterface.parse_annotations(sbml) annots = [] # identifiers annots.extend(['version', 'url', 'branch', 'revision', 'wc_lang_version', 'identifiers', 'updated', 'created']) if 'taxon.id' in parsed_annots: self.taxon = Taxon() annots.extend(['taxon.id', 'taxon.name', 'taxon.rank', 'taxon.identifiers', 'taxon.comments']) if 'env.id' in parsed_annots: self.env = Environment() annots.extend(['env.id', 'env.name', 'env.temp', 'env.temp_units', 'env.identifiers', 'env.comments']) LibSbmlInterface.get_annotations(self, LibSbmlInterface.gen_nested_attr_paths(annots), sbml, objs) # authors LibSbmlInterface.get_authors_annotation(self, sbml, objs) class Taxon(obj_tables.Model, SbmlModelMixin): """ Biological taxon (e.g. family, genus, species, strain, etc.) Attributes: id (:obj:`str`): unique identifier equal to 'taxon' name (:obj:`str`): name model (:obj:`Model`): model rank (:obj:`TaxonRank`): rank identifiers (:obj:`list` of :obj:`Identifier`): identifiers comments (:obj:`str`): comments references (:obj:`list` of :obj:`Reference`): references """ id = RegexAttribute(pattern=r'^taxon$', primary=True, unique=True) name = StringAttribute() model = OneToOneAttribute(Model, related_name='taxon') rank = EnumAttribute(TaxonRank, default=TaxonRank.species) identifiers = IdentifierOneToManyAttribute(related_name='taxon') comments = CommentAttribute() references = OneToManyAttribute('Reference', related_name='taxon') class Meta(obj_tables.Model.Meta): attribute_order = ('id', 'name', 'rank', 'identifiers', 'comments', 'references') table_format = TableFormat.column children = { 'submodel': ('identifiers', 'references'), 'core_model': ('identifiers', 'references'), } child_attrs = { 'sbml': ('id', 'name', 'model', 'rank', 'identifiers', 'comments'), 'wc_sim': (), } class Environment(obj_tables.Model, SbmlModelMixin): """ Environment Attributes: id (:obj:`str`): unique identifier equal to 'env' name (:obj:`str`): name model (:obj:`Model`): model temp (:obj:`float`): temperature temp_units (:obj:`unit_registry.Unit`): temperature units identifiers (:obj:`list` of :obj:`Identifier`): identifiers comments (:obj:`str`): comments references (:obj:`list` of :obj:`Reference`): references """ id = RegexAttribute(pattern=r'^env$', primary=True, unique=True) name = StringAttribute() model = OneToOneAttribute(Model, related_name='env') temp = FloatAttribute(verbose_name='Temperature') temp_units = UnitAttribute(unit_registry, choices=(unit_registry.parse_units('celsius'),), default=unit_registry.parse_units('celsius'), verbose_name='Temperature units') identifiers = IdentifierOneToManyAttribute(related_name='env') comments = CommentAttribute() references = OneToManyAttribute('Reference', related_name='env') class Meta(obj_tables.Model.Meta): attribute_order = ('id', 'name', 'temp', 'temp_units', 'identifiers', 'comments', 'references') table_format = TableFormat.column children = { 'submodel': ('identifiers', 'references'), 'core_model': ('identifiers', 'references'), } child_attrs = { 'sbml': ('id', 'name', 'model', 'temp', 'temp_units', 'identifiers', 'comments'), 'wc_sim': (), } class Submodel(obj_tables.Model, SbmlModelMixin): """ Submodel Attributes: id (:obj:`str`): unique identifier name (:obj:`str`): name model (:obj:`Model`): model framework (:obj:`pronto.Term`): modeling framework (e.g. dynamic flux balance analysis) identifiers (:obj:`list` of :obj:`Identifier`): identifiers conclusions (:obj:`list` of :obj:`Conclusion`): conclusions comments (:obj:`str`): comments references (:obj:`list` of :obj:`Reference`): references Related attributes: * reactions (:obj:`list` of :obj:`Reaction`): reactions * dfba_obj (:obj:`DfbaObjective`): objective function for a dFBA submodel; if not initialized, then `dfba_obj_reaction` is used as the objective function * dfba_obj_reactions (:obj:`list` of :obj:`DfbaObjReaction`): the growth reaction for a dFBA submodel * changes (:obj:`list` of :obj:`Change`): changes """ id = SlugAttribute() name = StringAttribute() model = ManyToOneAttribute(Model, related_name='submodels', related_manager=SubmodelsToModelRelatedManager) framework = OntoTermAttribute(onto, namespace='WC', terms=onto['WC:modeling_framework'].subclasses(), default=onto['WC:stochastic_simulation_algorithm'], none=False) identifiers = IdentifierManyToManyAttribute(related_name='submodels') evidence = EvidenceManyToManyAttribute('Evidence', related_name='submodels') conclusions = ManyToManyAttribute('Conclusion', related_name='submodels') comments = CommentAttribute() references = ManyToManyAttribute('Reference', related_name='submodels') class Meta(obj_tables.Model.Meta): attribute_order = ('id', 'name', 'framework', 'identifiers', 'evidence', 'conclusions', 'comments', 'references') indexed_attrs_tuples = (('id',), ) merge = obj_tables.ModelMerge.append children = { 'submodel': ('model', 'reactions', 'dfba_obj', 'dfba_obj_reactions', 'identifiers', 'evidence', 'conclusions', 'references', 'changes'), } child_attrs = { 'sbml': ('id', 'name', 'model', 'framework', 'identifiers', 'comments'), 'wc_sim': ('id', 'model', 'framework'), } def validate(self): """ Determine if the submodel is valid * dFBA submodel has an objective .. todo :: Check that the submodel uses water consistently -- either in all compartments or in none Returns: :obj:`InvalidObject` or None: `None` if the object is valid, otherwise return a list of errors as an instance of `InvalidObject` """ invalid_obj = super(Submodel, self).validate() if invalid_obj: errors = invalid_obj.attributes else: errors = [] if are_terms_equivalent(self.framework, onto['WC:dynamic_flux_balance_analysis']): if not self.dfba_obj: errors.append(InvalidAttribute(self.Meta.related_attributes['dfba_obj'], ['dFBA submodel must have an objective'])) if errors: return InvalidObject(self, errors) return None def get_children(self, kind=None, __type=None, recursive=True, __include_stop_conditions=True, **kwargs): """ Get a kind of children. If :obj:`kind` is :obj:`None`, children are defined to be the values of the related attributes defined in each class. Args: kind (:obj:`str`, optional): kind of children to get __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` recursive (:obj:`bool`, optional): if :obj:`True`, get children recursively kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs Returns: :obj:`list` of :obj:`Model`: children """ if '__type' in kwargs: __type = kwargs.pop('__type') if '__include_stop_conditions' in kwargs: __include_stop_conditions = kwargs.pop('__include_stop_conditions') children = self.get_immediate_children(kind=kind, __include_stop_conditions=__include_stop_conditions) # get recursive children if recursive: objs_to_explore = children children = set(children) while objs_to_explore: obj_to_explore = objs_to_explore.pop() for child in obj_to_explore.get_immediate_children(kind=kind): if child not in children: children.add(child) objs_to_explore.append(child) children = list(children) # filter by type/attributes matches = [] for child in children: if child.has_attr_vals(__type=__type, **kwargs): matches.append(child) children = matches # return children return children def get_immediate_children(self, kind=None, __type=None, __include_stop_conditions=True, **kwargs): """ Get a kind of children. If :obj:`kind` is :obj:`None`, children are defined to be the values of the related attributes defined in each class. Args: kind (:obj:`str`, optional): kind of children to get __type (:obj:`types.TypeType` or :obj:`tuple` of :obj:`types.TypeType`): subclass(es) of :obj:`Model` kwargs (:obj:`dict` of :obj:`str` --> :obj:`object`): dictionary of attribute name/value pairs Returns: :obj:`list` of :obj:`Model`: children """ if '__type' in kwargs: __type = kwargs.pop('__type') if '__include_stop_conditions' in kwargs: __include_stop_conditions = kwargs.pop('__include_stop_conditions') immediate_children = super(Submodel, self).get_immediate_children(kind=kind) if kind == 'submodel' and __include_stop_conditions: all_children = set(self.get_children(kind=kind, __include_stop_conditions=False)) for stop_condition in self.model.stop_conditions: stop_condition_children = stop_condition.get_children(kind='submodel') stop_condition_species =