id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/full/lang/ms.js
/* Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved. For licensing, see LICENSE.md or https://ckeditor.com/license */ CKEDITOR.lang['ms']={"editor":"Rich Text Editor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Press ALT 0 for help","browseServer":"Browse Server","url":"URL","protocol":"Protokol","upload":"Muat Naik","uploadSubmit":"Hantar ke Server","image":"Gambar","flash":"Flash","form":"Borang","checkbox":"Checkbox","radio":"Butang Radio","textField":"Text Field","textarea":"Textarea","hiddenField":"Field Tersembunyi","button":"Butang","select":"Field Pilihan","imageButton":"Butang Bergambar","notSet":"<tidak di set>","id":"Id","name":"Nama","langDir":"Arah Tulisan","langDirLtr":"Kiri ke Kanan (LTR)","langDirRtl":"Kanan ke Kiri (RTL)","langCode":"Kod Bahasa","longDescr":"Butiran Panjang URL","cssClass":"Kelas-kelas Stylesheet","advisoryTitle":"Tajuk Makluman","cssStyle":"Stail","ok":"OK","cancel":"Batal","close":"Tutup","preview":"Prebiu","resize":"Resize","generalTab":"Umum","advancedTab":"Advanced","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"You have changed some options. Are you sure you want to close the dialog window?","options":"Options","target":"Sasaran","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"Kiri ke Kanan (LTR)","langDirRTL":"Kanan ke Kiri (RTL)","styles":"Stail","cssClasses":"Kelas-kelas Stylesheet","width":"Lebar","height":"Tinggi","align":"Jajaran","left":"Kiri","right":"Kanan","center":"Tengah","justify":"Jajaran Blok","alignLeft":"Jajaran Kiri","alignRight":"Jajaran Kanan","alignCenter":"Align Center","alignTop":"Atas","alignMiddle":"Pertengahan","alignBottom":"Bawah","alignNone":"None","invalidValue":"Nilai tidak sah.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"},"about":{"copy":"Copyright &copy; $1. All rights reserved.","dlgTitle":"About CKEditor 4","moreInfo":"For licensing information please visit our web site:"},"basicstyles":{"bold":"Bold","italic":"Italic","strike":"Strike Through","subscript":"Subscript","superscript":"Superscript","underline":"Underline"},"bidi":{"ltr":"Text direction from left to right","rtl":"Text direction from right to left"},"blockquote":{"toolbar":"Block Quote"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"clipboard":{"copy":"Salin","copyError":"Keselamatan perisian browser anda tidak membenarkan operasi salinan text/imej. Sila gunakan papan kekunci (Ctrl/Cmd+C).","cut":"Potong","cutError":"Keselamatan perisian browser anda tidak membenarkan operasi suntingan text/imej. Sila gunakan papan kekunci (Ctrl/Cmd+X).","paste":"Tampal","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Paste Area","pasteMsg":"Paste your content inside the area below and press OK."},"colorbutton":{"auto":"Otomatik","bgColorTitle":"Warna Latarbelakang","colors":{"000":"Black","800000":"Maroon","8B4513":"Saddle Brown","2F4F4F":"Dark Slate Gray","008080":"Teal","000080":"Navy","4B0082":"Indigo","696969":"Dark Gray","B22222":"Fire Brick","A52A2A":"Brown","DAA520":"Golden Rod","006400":"Dark Green","40E0D0":"Turquoise","0000CD":"Medium Blue","800080":"Purple","808080":"Gray","F00":"Red","FF8C00":"Dark Orange","FFD700":"Gold","008000":"Green","0FF":"Cyan","00F":"Blue","EE82EE":"Violet","A9A9A9":"Dim Gray","FFA07A":"Light Salmon","FFA500":"Orange","FFFF00":"Yellow","00FF00":"Lime","AFEEEE":"Pale Turquoise","ADD8E6":"Light Blue","DDA0DD":"Plum","D3D3D3":"Light Grey","FFF0F5":"Lavender Blush","FAEBD7":"Antique White","FFFFE0":"Light Yellow","F0FFF0":"Honeydew","F0FFFF":"Azure","F0F8FF":"Alice Blue","E6E6FA":"Lavender","FFF":"White","1ABC9C":"Strong Cyan","2ECC71":"Emerald","3498DB":"Bright Blue","9B59B6":"Amethyst","4E5F70":"Grayish Blue","F1C40F":"Vivid Yellow","16A085":"Dark Cyan","27AE60":"Dark Emerald","2980B9":"Strong Blue","8E44AD":"Dark Violet","2C3E50":"Desaturated Blue","F39C12":"Orange","E67E22":"Carrot","E74C3C":"Pale Red","ECF0F1":"Bright Silver","95A5A6":"Light Grayish Cyan","DDD":"Light Gray","D35400":"Pumpkin","C0392B":"Strong Red","BDC3C7":"Silver","7F8C8D":"Grayish Cyan","999":"Dark Gray"},"more":"Warna lain-lain...","panelTitle":"Colors","textColorTitle":"Warna Text"},"colordialog":{"clear":"Clear","highlight":"Highlight","options":"Color Options","selected":"Selected Color","title":"Select color"},"templates":{"button":"Templat","emptyListMsg":"(Tiada Templat Disimpan)","insertOption":"Replace actual contents","options":"Template Options","selectPromptMsg":"Sila pilih templat untuk dibuka oleh editor<br>(kandungan sebenar akan hilang):","title":"Templat Kandungan"},"contextmenu":{"options":"Context Menu Options"},"copyformatting":{"label":"Copy Formatting","notification":{"copied":"Formatting copied","applied":"Formatting applied","canceled":"Formatting canceled","failed":"Formatting failed. You cannot apply styles without copying them first."}},"div":{"IdInputLabel":"Id","advisoryTitleInputLabel":"Advisory Title","cssClassInputLabel":"Stylesheet Classes","edit":"Edit Div","inlineStyleInputLabel":"Inline Style","langDirLTRLabel":"Left to Right (LTR)","langDirLabel":"Language Direction","langDirRTLLabel":"Right to Left (RTL)","languageCodeInputLabel":" Language Code","remove":"Remove Div","styleSelectLabel":"Style","title":"Create Div Container","toolbar":"Create Div Container"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"find":{"find":"Cari","findOptions":"Find Options","findWhat":"Perkataan yang dicari:","matchCase":"Padanan case huruf","matchCyclic":"Match cyclic","matchWord":"Padana Keseluruhan perkataan","notFoundMsg":"Text yang dicari tidak dijumpai.","replace":"Ganti","replaceAll":"Ganti semua","replaceSuccessMsg":"%1 occurrence(s) replaced.","replaceWith":"Diganti dengan:","title":"Find and Replace"},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"flash":{"access":"Script Access","accessAlways":"Always","accessNever":"Never","accessSameDomain":"Same domain","alignAbsBottom":"Bawah Mutlak","alignAbsMiddle":"Pertengahan Mutlak","alignBaseline":"Garis Dasar","alignTextTop":"Atas Text","bgcolor":"Warna Latarbelakang","chkFull":"Allow Fullscreen","chkLoop":"Loop","chkMenu":"Enable Flash Menu","chkPlay":"Auto Play","flashvars":"Variables for Flash","hSpace":"Ruang Melintang","properties":"Flash Properties","propertiesTab":"Properties","quality":"Quality","qualityAutoHigh":"Auto High","qualityAutoLow":"Auto Low","qualityBest":"Best","qualityHigh":"High","qualityLow":"Low","qualityMedium":"Medium","scale":"Scale","scaleAll":"Show all","scaleFit":"Exact Fit","scaleNoBorder":"No Border","title":"Flash Properties","vSpace":"Ruang Menegak","validateHSpace":"HSpace must be a number.","validateSrc":"Sila taip sambungan URL","validateVSpace":"VSpace must be a number.","windowMode":"Window mode","windowModeOpaque":"Opaque","windowModeTransparent":"Transparent","windowModeWindow":"Window"},"font":{"fontSize":{"label":"Saiz","voiceLabel":"Font Size","panelTitle":"Saiz"},"label":"Font","panelTitle":"Font","voiceLabel":"Font"},"forms":{"button":{"title":"Ciri-ciri Butang","text":"Teks (Nilai)","type":"Jenis","typeBtn":"Button","typeSbm":"Submit","typeRst":"Reset"},"checkboxAndRadio":{"checkboxTitle":"Ciri-ciri Checkbox","radioTitle":"Ciri-ciri Butang Radio","value":"Nilai","selected":"Dipilih","required":"Required"},"form":{"title":"Ciri-ciri Borang","menu":"Ciri-ciri Borang","action":"Tindakan borang","method":"Cara borang dihantar","encoding":"Encoding"},"hidden":{"title":"Ciri-ciri Field Tersembunyi","name":"Nama","value":"Nilai"},"select":{"title":"Ciri-ciri Selection Field","selectInfo":"Select Info","opAvail":"Pilihan sediada","value":"Nilai","size":"Saiz","lines":"garisan","chkMulti":"Benarkan pilihan pelbagai","required":"Required","opText":"Teks","opValue":"Nilai","btnAdd":"Tambah Pilihan","btnModify":"Ubah Pilihan","btnUp":"Naik ke atas","btnDown":"Turun ke bawah","btnSetValue":"Set sebagai nilai terpilih","btnDelete":"Padam"},"textarea":{"title":"Ciri-ciri Textarea","cols":"Lajur","rows":"Baris"},"textfield":{"title":"Ciri-ciri Text Field","name":"Nama","value":"Nilai","charWidth":"Lebar isian","maxChars":"Isian Maksimum","required":"Required","type":"Jenis","typeText":"Teks","typePass":"Kata Laluan","typeEmail":"Email","typeSearch":"Search","typeTel":"Telephone Number","typeUrl":"URL"}},"format":{"label":"Format","panelTitle":"Format","tag_address":"Alamat","tag_div":"Perenggan (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Normal","tag_pre":"Telah Diformat"},"horizontalrule":{"toolbar":"Masukkan Garisan Membujur"},"iframe":{"border":"Show frame border","noUrl":"Please type the iframe URL","scrolling":"Enable scrollbars","title":"IFrame Properties","toolbar":"IFrame"},"image":{"alt":"Text Alternatif","border":"Border","btnUpload":"Hantar ke Server","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"Ruang Melintang","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"Info Imej","linkTab":"Sambungan","lockRatio":"Tetapkan Nisbah","menu":"Ciri-ciri Imej","resetSize":"Saiz Set Semula","title":"Ciri-ciri Imej","titleButton":"Ciri-ciri Butang Bergambar","upload":"Muat Naik","urlMissing":"Image source URL is missing.","vSpace":"Ruang Menegak","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"indent":{"indent":"Tambahkan Inden","outdent":"Kurangkan Inden"},"smiley":{"options":"Smiley Options","title":"Masukkan Smiley","toolbar":"Smiley"},"language":{"button":"Set language","remove":"Remove language"},"link":{"acccessKey":"Kunci Akses","advanced":"Advanced","advisoryContentType":"Jenis Kandungan Makluman","advisoryTitle":"Tajuk Makluman","anchor":{"toolbar":"Masukkan/Sunting Pautan","menu":"Ciri-ciri Pautan","title":"Ciri-ciri Pautan","name":"Nama Pautan","errorName":"Sila taip nama pautan","remove":"Remove Anchor"},"anchorId":"dengan menggunakan ID elemen","anchorName":"dengan menggunakan nama pautan","charset":"Linked Resource Charset","cssClasses":"Kelas-kelas Stylesheet","download":"Force Download","displayText":"Display Text","emailAddress":"Alamat E-Mail","emailBody":"Isi Kandungan Mesej","emailSubject":"Subjek Mesej","id":"Id","info":"Butiran Sambungan","langCode":"Arah Tulisan","langDir":"Arah Tulisan","langDirLTR":"Kiri ke Kanan (LTR)","langDirRTL":"Kanan ke Kiri (RTL)","menu":"Sunting Sambungan","name":"Nama","noAnchors":"(Tiada pautan terdapat dalam dokumen ini)","noEmail":"Sila taip alamat e-mail","noUrl":"Sila taip sambungan URL","noTel":"Please type the phone number","other":"<lain>","phoneNumber":"Phone number","popupDependent":"Bergantungan (Netscape)","popupFeatures":"Ciri Tetingkap Popup","popupFullScreen":"Skrin Penuh (IE)","popupLeft":"Posisi Kiri","popupLocationBar":"Bar Lokasi","popupMenuBar":"Bar Menu","popupResizable":"Resizable","popupScrollBars":"Bar-bar skrol","popupStatusBar":"Bar Status","popupToolbar":"Toolbar","popupTop":"Posisi Atas","rel":"Relationship","selectAnchor":"Sila pilih pautan","styles":"Stail","tabIndex":"Indeks Tab ","target":"Sasaran","targetFrame":"<bingkai>","targetFrameName":"Nama Bingkai Sasaran","targetPopup":"<tetingkap popup>","targetPopupName":"Nama Tetingkap Popup","title":"Sambungan","toAnchor":"Pautan dalam muka surat ini","toEmail":"E-Mail","toUrl":"URL","toPhone":"Phone","toolbar":"Masukkan/Sunting Sambungan","type":"Jenis Sambungan","unlink":"Buang Sambungan","upload":"Muat Naik"},"list":{"bulletedlist":"Senarai tidak bernombor","numberedlist":"Senarai bernombor"},"liststyle":{"bulletedTitle":"Bulleted List Properties","circle":"Circle","decimal":"Decimal (1, 2, 3, etc.)","disc":"Disc","lowerAlpha":"Lower Alpha (a, b, c, d, e, etc.)","lowerRoman":"Lower Roman (i, ii, iii, iv, v, etc.)","none":"None","notset":"<not set>","numberedTitle":"Numbered List Properties","square":"Square","start":"Start","type":"Type","upperAlpha":"Upper Alpha (A, B, C, D, E, etc.)","upperRoman":"Upper Roman (I, II, III, IV, V, etc.)","validateStartNumber":"List start number must be a whole number."},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"newpage":{"toolbar":"Helaian Baru"},"pagebreak":{"alt":"Page Break","toolbar":"Insert Page Break for Printing"},"pastetext":{"button":"Tampal sebagai text biasa","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Tampal sebagai text biasa"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"Tampal dari Word","toolbar":"Tampal dari Word"},"preview":{"preview":"Prebiu"},"print":{"toolbar":"Cetak"},"removeformat":{"toolbar":"Buang Format"},"save":{"toolbar":"Simpan"},"selectall":{"toolbar":"Pilih Semua"},"showblocks":{"toolbar":"Show Blocks"},"sourcearea":{"toolbar":"Sumber"},"specialchar":{"options":"Special Character Options","title":"Sila pilih huruf istimewa","toolbar":"Masukkan Huruf Istimewa"},"scayt":{"btn_about":"About SCAYT","btn_dictionaries":"Dictionaries","btn_disable":"Disable SCAYT","btn_enable":"Enable SCAYT","btn_langs":"Languages","btn_options":"Options","text_title":"Spell Check As You Type"},"stylescombo":{"label":"Stail","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"Saiz Border","caption":"Keterangan","cell":{"menu":"Cell","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"Buangkan Sel-sel","merge":"Cantumkan Sel-sel","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"Tambahan Ruang Sel","cellSpace":"Ruangan Antara Sel","column":{"menu":"Column","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"Buangkan Lajur"},"columns":"Jaluran","deleteTable":"Delete Table","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","heightUnit":"height unit","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a positive number.","invalidCellSpacing":"Cell spacing must be a positive number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"Ciri-ciri Jadual","row":{"menu":"Row","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"Buangkan Baris"},"rows":"Barisan","summary":"Summary","title":"Ciri-ciri Jadual","toolbar":"Jadual","widthPc":"peratus","widthPx":"piksel-piksel","widthUnit":"width unit"},"undo":{"redo":"Ulangkan","undo":"Batalkan"},"widget":{"move":"Click and drag to move","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the user.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."},"wsc":{"btnIgnore":"Biar","btnIgnoreAll":"Biarkan semua","btnReplace":"Ganti","btnReplaceAll":"Gantikan Semua","btnUndo":"Batalkan","changeTo":"Tukarkan kepada","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"Pemeriksa ejaan tidak dipasang. Adakah anda mahu muat turun sekarang?","manyChanges":"Pemeriksaan ejaan siap: %1 perkataan diubah","noChanges":"Pemeriksaan ejaan siap: Tiada perkataan diubah","noMispell":"Pemeriksaan ejaan siap: Tiada salah ejaan","noSuggestions":"- Tiada cadangan -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"Tidak terdapat didalam kamus","oneChange":"Pemeriksaan ejaan siap: Satu perkataan telah diubah","progress":"Pemeriksaan ejaan sedang diproses...","title":"Spell Checker","toolbar":"Semak Ejaan"}};
PypiClean
/K_AIKO-0.5.2-py3-none-any.whl/kaiko/tui/inputs.py
import functools import contextlib import re import queue import threading from typing import Optional, List, Tuple, Dict, Callable from pathlib import Path import dataclasses from ..utils import datanodes as dn from ..utils import commands as cmd from ..utils import config as cfg from ..utils import markups as mu from ..devices import engines from . import sheditors from .textboxes import Caret, TextBox, TextBoxWidgetSettings # hint class Hint: pass @dataclasses.dataclass(frozen=True) class DescHint(Hint): message: str @dataclasses.dataclass(frozen=True) class InfoHint(Hint): message: str @dataclasses.dataclass class HintState: hint: Hint index: Optional[int] tokens: Optional[List[str]] class HintManager: def __init__(self, editor, preview_handler): self.editor = editor self.preview_handler = preview_handler self.popup_queue = queue.Queue() self.hint_state = None def get_hint(self): return None if self.hint_state is None else self.hint_state.hint def get_hint_location(self): return None if self.hint_state is None else self.hint_state.index def add_popup(self, hint): self.popup_queue.put(hint) def popup_hint(self): hint = self.hint_state.hint if not hint.message: return False self.add_popup(hint) return True def set_hint(self, hint, index=None): if isinstance(hint, DescHint): msg_tokens = ( [token.string for token in self.editor.tokens[:index]] if index is not None else None ) elif isinstance(hint, InfoHint): msg_tokens = ( [token.string for token in self.editor.tokens[: index + 1]] if index is not None else None ) else: assert False self.hint_state = HintState(hint=hint, index=index, tokens=msg_tokens) self.update_preview() return True def cancel_hint(self): if self.hint_state is None: return False self.hint_state = None self.update_preview() return True def update_hint(self): if self.hint_state is None: return False if self.hint_state.tokens is None: return self.cancel_hint() if self.hint_state.index is not None and self.hint_state.index >= len( self.editor.tokens ): return self.cancel_hint() if len(self.hint_state.tokens) > len(self.editor.tokens): return self.cancel_hint() for token_string, token in zip(self.hint_state.tokens, self.editor.tokens): if token_string != token.string: return self.cancel_hint() if ( isinstance(self.hint_state.hint, DescHint) and self.editor.tokens[len(self.hint_state.tokens) - 1].type is not None ): return self.cancel_hint() return False def update_preview(self): if self.hint_state is None: self.preview_handler(None) elif not isinstance(self.hint_state.hint, InfoHint): self.preview_handler(None) elif self.hint_state.tokens is None: self.preview_handler(None) elif len(self.hint_state.tokens) != 2: self.preview_handler(None) elif self.hint_state.tokens[0] != "play": self.preview_handler(None) else: self.preview_handler(self.hint_state.tokens[1]) def ask_for_hint(self, index, type="all"): if index not in range(len(self.editor.tokens)): return False target_type = self.editor.tokens[index].type if target_type is None: if type not in ("all", "desc"): self.cancel_hint() return False msg = self.editor.desc(index) if msg is None: self.cancel_hint() return False hint = DescHint(msg) self.set_hint(hint, index) return True else: if type not in ("all", "info"): self.cancel_hint() return False msg = self.editor.info(index + 1) if msg is None: self.cancel_hint() return False hint = InfoHint(msg) self.set_hint(hint, index) return True # autocomplete @dataclasses.dataclass class TabState: suggestions: List[str] sugg_index: int token_index: int original_token: List[str] original_pos: int selection: slice class AutocompleteManager: def __init__(self, editor): self.editor = editor self.tab_state = None def get_suggestions_list(self): if self.tab_state is None: return None else: return self.tab_state.suggestions def get_suggestions_index(self): if self.tab_state is None: return None else: return self.tab_state.sugg_index def is_in_cycle(self): return self.tab_state is not None def prepare_tab_state(self, action=+1): # find the token to autocomplete index, token = self.editor.find_token_before(self.editor.pos) if token is None: token_index = 0 target = "" selection = slice(self.editor.pos, self.editor.pos) elif token.mask.stop < self.editor.pos: token_index = index + 1 target = "" selection = slice(self.editor.pos, self.editor.pos) else: token_index = index target = token.string selection = token.mask # generate suggestions suggestions = [ sheditors.quoting(sugg) for sugg in self.editor.suggest(token_index, target) ] sugg_index = len(suggestions) if action == -1 else -1 # tab state original_pos = self.editor.pos original_token = self.editor.buffer[selection] return TabState( suggestions=suggestions, sugg_index=sugg_index, token_index=token_index, original_token=original_token, original_pos=original_pos, selection=selection, ) def autocomplete(self, action=+1): if self.tab_state is None: tab_state = self.prepare_tab_state(action) if len(tab_state.suggestions) == 0: # no suggestion return None if len(tab_state.suggestions) == 1: # one suggestion -> complete directly self.editor.replace(tab_state.selection, tab_state.suggestions[0]) return tab_state.token_index self.tab_state = tab_state if action == +1: self.tab_state.sugg_index += 1 elif action == -1: self.tab_state.sugg_index -= 1 else: raise ValueError(f"invalid action: {action}") if self.tab_state.sugg_index not in range(len(self.tab_state.suggestions)): self.cancel_autocomplete() return None # fill in selected token self.tab_state.selection = self.editor.replace( self.tab_state.selection, self.tab_state.suggestions[self.tab_state.sugg_index], ) return self.tab_state.token_index def cancel_autocomplete(self): if self.tab_state is None: return self.editor.replace(self.tab_state.selection, self.tab_state.original_token) self.editor.move_to(self.tab_state.original_pos) self.tab_state = None def finish_autocomplete(self): if self.tab_state is None: return None index = self.tab_state.token_index self.tab_state = None return index # input class Result: pass @dataclasses.dataclass(frozen=True) class EmptyResult(Result): pass @dataclasses.dataclass(frozen=True) class ErrorResult(Result): command_str: str index: Optional[int] error: Exception @dataclasses.dataclass(frozen=True) class CompleteResult(Result): command_group: str command_str: str command: Callable class ShellSyntaxError(Exception): pass class HistoryManager: r""" Fields ------ history_path : Path latest_command : tuple of str and str, optional lastest group and command. buffers : list of list of str The buffers of editor. buffer_index : int The index of current buffer. """ TRIM_LEN = 10 PATTERN = re.compile(r"\[(\w+)\] (.+)") def __init__(self, history_path, latest_command=None): self.history_path = history_path self.latest_command = latest_command self.buffers = [[]] self.buffer_index = -1 @property def buffer(self): return self.buffers[self.buffer_index] def prev(self): if self.buffer_index == -len(self.buffers): return False self.buffer_index -= 1 return True def next(self): if self.buffer_index == -1: return False self.buffer_index += 1 return True def write_history(self, command_group, command): self.history_path.touch() command = command.strip() if ( command and command_group and (command_group, command) != self.latest_command ): open(self.history_path, "a").write(f"\n[{command_group}] {command}") self.latest_command = (command_group, command) def read_history(self, command_groups, read_size): buffers = [] self.history_path.touch() self.latest_command = None for command in open(self.history_path): command = command.strip() match = self.PATTERN.fullmatch(command) if match: self.latest_command = (match.group(1), match.group(2)) if match.group(1) in command_groups and ( not buffers or buffers[-1] != match.group(2) ): buffers.append(match.group(2)) if len(buffers) - read_size > self.TRIM_LEN: del buffers[: self.TRIM_LEN] self.buffers = [list(command) for command in buffers[-read_size:]] self.buffers.append([]) self.buffer_index = -1 class InputSettings(cfg.Configurable): r""" Fields ------ preview_song : bool Whether to preview the song when selected. history_size : int The maximum history size. """ preview_song: bool = True history_size: int = 500 @cfg.subconfig class control(cfg.Configurable): r""" Fields ------ confirm_key : str The key for confirming input. help_key : str The key for help. autocomplete_keys : tuple of str and str and str The keys for finding the next, previous and canceling suggestions. keymap : dict from str to str The keymap of input. The key of dict is the keystroke, and the value of dict is the action to activate. The format of action is just like a normal python code: `input.insert_typeahead() or input.move_right()`. The syntax is:: <function> ::= "input." /(?!_)\w+/ "()" <operator> ::= " | " | " & " | " and " | " or " <action> ::= (<function> <operator>)* <function> """ confirm_key: str = "Enter" help_key: str = "Alt_Enter" autocomplete_keys: Tuple[str, str, str] = ("Tab", "Shift_Tab", "Esc") keymap: Dict[str, str] = { "Backspace": "input.backspace()", "Delete": "input.delete()", "Left": "input.move_left()", "Right": "input.insert_typeahead() or input.move_right()", "Up": "input.prev()", "Down": "input.next()", "Home": "input.move_to_start()", "End": "input.move_to_end()", "Ctrl_Left": "input.move_to_word_start()", "Ctrl_Right": "input.move_to_word_end()", "Ctrl_Backspace": "input.delete_to_word_start()", "Ctrl_Delete": "input.delete_to_word_end()", "Alt_Left": "input.move_to_token_start()", "Alt_Right": "input.move_to_token_end()", "Alt_Backspace": "input.delete_backward_token()", "Alt_Delete": "input.delete_forward_token()", "Esc": "input.cancel_typeahead() | input.cancel_hint()", "'\\x04'": "input.delete() or input.exit_if_empty()", } @cfg.subconfig class hint(cfg.Configurable): r""" Fields ------ typeahead : str The markup template for the type-ahead. highlight : str The markup template for the highlighted token. desc_message : str The markup template for the desc message. info_message : str The markup template for the info message. message_max_lines : int The maximum number of lines of the message. message_overflow_ellipsis : str Texts to display when overflowing. suggestions_lines : int The maximum number of lines of the suggestions. suggestion_items : tuple of str and str The markup templates for the unselected/selected suggestion. suggestion_overflow_ellipses : tuple of str and str Texts to display when overflowing top/bottom. """ typeahead: str = "[weight=dim][slot/][/]" highlight: str = "[underline][slot/][/]" desc_message: str = "[weight=dim][slot/][/]" info_message: str = f"{'─'*80}\n[slot/]\n{'─'*80}" message_max_lines: int = 16 message_overflow_ellipsis: str = "[weight=dim]…[/]" suggestions_lines: int = 8 suggestion_items: Tuple[str, str] = ("• [slot/]", "• [invert][slot/][/]") suggestion_overflow_ellipses: Tuple[str, str] = ( "[weight=dim]ⵗ [slot/][/]", "[weight=dim]ⵗ [slot/][/]", ) @cfg.subconfig class textbox(cfg.Configurable, TextBoxWidgetSettings): __doc__ = TextBoxWidgetSettings.__doc__ def __init__(self): pass class ContextDispatcher: def __init__(self): self.lock = threading.RLock() self.isin = False self.before_callbacks = [] self.after_callbacks = [] self.onerror_callbacks = [] def before(self, callback): with self.lock: self.before_callbacks.append(callback) def after(self, callback): with self.lock: self.after_callbacks.append(callback) def onerror(self, callback): with self.lock: self.onerror_callbacks.append(callback) @contextlib.contextmanager def on(self): with self.lock: isin = self.isin if isin: for callback in self.before_callbacks: callback() self.isin = False try: yield except: self.isin = isin if isin: for callback in self.onerror_callbacks: callback() raise finally: self.isin = isin if isin: for callback in self.after_callbacks: callback() def onstate(*states): def onstate_dec(func): @functools.wraps(func) def onstate_func(self, *args, **kwargs): if self.state not in states: return False return func(self, *args, **kwargs) return onstate_func return onstate_dec def locked(func): @functools.wraps(func) def locked_func(self, *args, **kwargs): with self.edit_ctxt.on(): return func(self, *args, **kwargs) return locked_func class Input: r"""Input editor. Attributes ---------- settings : InputSettings The input settings. history : HistoryManager The input history manager. editor : sheditors.Editor The editor of command. typeahead : str The type ahead of input. hint_manager : HintManager autocomplete_manager : AutocompleteManager result : Result or None The result of input. state : str The input state. buffer_modified_counter : int The event counter for modifying buffer. key_pressed_counter : int The event counter for key pressing. """ def __init__( self, preview_handler, history_path, settings, ): r"""Constructor. Parameters ---------- preview_handler : function history_path : Path The path of command history. settings : InputSettings The settings of input. """ self.settings = settings self.history = HistoryManager(history_path) self.editor = sheditors.Editor(None, self.history.buffer) self.typeahead = "" self.hint_manager = HintManager( self.editor, lambda song: preview_handler(song) if self.settings.preview_song else None, ) self.autocomplete_manager = AutocompleteManager(self.editor) self.state = "FIN" self.result = None self.edit_ctxt = ContextDispatcher() self.key_pressed_counter = 0 self.buffer_modified_counter = 0 def _set_settings(self, settings): self.settings = settings def _register(self, fin_event, provider): rich = provider.get(mu.RichParser) renderer = provider.get(engines.Renderer) controller = provider.get(engines.Controller) stroke = InputStroke(self, self.settings.control) stroke.register(controller) state = InputView(self) text_renderer = TextRenderer(rich, self.settings.hint) msg_renderer = MsgRenderer(rich, self.settings.hint) renderer.add_drawer(state.load(fin_event), zindex=()) renderer.add_drawer(msg_renderer.render_msg(state), zindex=(1,)) textbox = TextBox( text_renderer.render_text(state), self.settings.textbox, ).load(provider) return textbox def _record_command(self): command = "".join(self.editor.buffer).strip() self.history.write_history(self.editor.group, command) @locked @onstate("EDIT") def _finish_session(self, res): r"""Finish this session of input. Parameters ---------- res : Result The result. """ self.result = res self.state = "FIN" @locked @onstate("FIN") def _new_session(self, command_parser, clear=True): r"""Start a new session of input. Parameters ---------- command_parser : cmd.CommandParser clear : bool, optional """ self.editor.update_parser(command_parser) if clear: groups = self.editor.get_all_groups() history_size = self.settings.history_size self.history.read_history(groups, history_size) self.editor.init(self.history.buffer) self.update_buffer(clear=True) self.start() @locked @onstate("FIN") def start(self): """Start a session of input. Returns ------- succ : bool """ self.result = None self.state = "EDIT" return True @locked @onstate("EDIT") def prev(self): """Previous buffer. Returns ------- succ : bool """ succ = self.history.prev() if not succ: return False self.editor.init(self.history.buffer) self.update_buffer(clear=True) return True @locked @onstate("EDIT") def next(self): """Next buffer. Returns ------- succ : bool """ succ = self.history.next() if not succ: return False self.editor.init(self.history.buffer) self.update_buffer(clear=True) return True @locked def show_typeahead(self): """Make typeahead. Show the possible command you want to type. Only work if the caret is at the end of buffer. Returns ------- succ : bool `False` if unable to complete or the caret is not at the end of buffer. """ if self.editor.pos != len(self.editor.buffer): self.typeahead = "" return False # search history pos = self.editor.pos for buffer in reversed(self.history.buffers): if len(buffer) > pos and buffer[:pos] == self.editor.buffer: self.typeahead = "".join(buffer[pos:]) return True self.typeahead = "" return False @locked def cancel_typeahead(self): """Cancel typeahead. Returns ------- succ : bool """ self.typeahead = "" return True @locked @onstate("EDIT") def insert_typeahead(self): """Insert typeahead. Insert the typeahead if the caret is at the end of buffer. Returns ------- succ : bool `False` if there is no typeahead or the caret is not at the end of buffer. """ if self.typeahead == "" or self.editor.pos != len(self.editor.buffer): return False self.editor.insert(self.typeahead) self.update_buffer() self.ask_for_hint() return True @locked def add_popup(self, msg): """Add popup. Show hint above the prompt. Parameters ---------- msg : str The message of hint. Returns ------- succ : bool """ self.hint_manager.add_popup(DescHint(msg)) return True @locked def set_hint(self, msg, index=None): """Set hint. Show hint below the prompt. Parameters ---------- msg : str The message of hint. index : int or None Index of the token to which the hint is directed, or `None` for nothing. Returns ------- succ : bool """ return self.hint_manager.set_hint(DescHint(msg), index=index) @locked def cancel_hint(self): """Cancel hint. Remove the hint below the prompt. Returns ------- succ : bool """ return self.hint_manager.cancel_hint() @locked def update_hint(self): """Update hint. Remove hint if the target is updated. Returns ------- succ : bool `False` if there is no hint or the hint isn't removed. """ return self.hint_manager.update_hint() @locked @onstate("EDIT") def ask_for_hint(self, index=None, type="all"): """Ask some hint for command. Provide some hint for the command on the caret. Parameters ---------- index : int, optional type : one of "info", "desc", "all", optional The type of hint to ask. Returns ------- succ : bool """ if index is None: index, token = self.editor.find_token_before(self.editor.pos) return self.hint_manager.ask_for_hint(index, type=type) @locked def update_buffer(self, clear=False): """Update buffer. Parameters ---------- clear : bool, optional Returns ------- succ : bool """ self.editor.parse() self.buffer_modified_counter += 1 self.cancel_typeahead() if clear: self.cancel_hint() else: self.update_hint() return True @locked @onstate("EDIT") def insert(self, text): """Input. Insert some text into the buffer. Parameters ---------- text : str The text to insert. It shouldn't contain any nongraphic character, except for prefix `\\b` which indicate deleting. Returns ------- succ : bool `False` if buffer isn't changed. """ succ = self.editor.insert(text) if not succ: return False self.update_buffer() self.show_typeahead() self.ask_for_hint() return True @locked @onstate("EDIT") def backspace(self): """Backspace. Delete one character before the caret if exists. Returns ------- succ : bool """ succ = self.editor.backspace() if not succ: return False self.update_buffer() self.ask_for_hint() return True @locked @onstate("EDIT") def delete(self): """Delete. Delete one character after the caret if exists. Returns ------- succ : bool """ succ = self.editor.delete() if not succ: return False self.update_buffer() self.ask_for_hint() return True @locked @onstate("EDIT") def delete_all(self): """Delete All. Returns ------- succ : bool """ succ = self.editor.delete_all() if not succ: return False self.update_buffer() self.ask_for_hint() return True @locked @onstate("EDIT") def delete_range(self, start, end): """Delete range. Parameters ---------- start : int or None end : int or None Returns ------- succ : bool """ self.editor.replace(slice(start, end), "") self.update_buffer() self.ask_for_hint() return True @locked @onstate("EDIT") def delete_to_word_start(self): """Delete to the word start. The word is defined as `\\w+|\\W+`. Returns ------- succ : bool """ mask = self.editor.to_word_start() return self.delete_range(mask.start, mask.stop) @locked @onstate("EDIT") def delete_to_word_end(self): """Delete to the word end. The word is defined as `\\w+|\\W+`. Returns ------- succ : bool """ mask = self.editor.to_word_end() return self.delete_range(mask.start, mask.stop) @locked @onstate("EDIT") def delete_token(self, index): """Delete current token. Parameters ---------- index : int Returns ------- succ : bool """ token = self.editor.tokens[index] return self.delete_range(token.mask.start, token.mask.stop) @locked @onstate("EDIT") def delete_backward_token(self): """Delete backward token. Returns ------- succ : bool """ _, token = self.editor.find_token_before(self.editor.pos) if token is None: return self.delete_range(0, self.editor.pos) else: return self.delete_range( token.mask.start, max(self.editor.pos, token.mask.stop) ) @locked @onstate("EDIT") def delete_forward_token(self): """Delete forward token. Returns ------- succ : bool """ _, token = self.editor.find_token_after(self.editor.pos) if token is None: return self.delete_range(self.editor.pos, None) else: return self.delete_range( min(self.editor.pos, token.mask.start), token.mask.stop ) @locked @onstate("EDIT") def move_to(self, pos): """Move caret to the specific position. Regardless of success or failure, typeahead will be cancelled. Parameters ---------- pos : int or None Index of buffer, which will be clamped to 0 and length of buffer, or `None` for the end of buffer. Returns ------- succ : bool """ succ = self.editor.move_to(pos) self.cancel_typeahead() return succ @locked @onstate("EDIT") def move(self, offset): """Move caret. Parameters ---------- offset : int Returns ------- succ : bool """ return self.move_to(self.editor.pos + offset) @locked @onstate("EDIT") def move_left(self): """Move caret one character to the left. Returns ------- succ : bool """ return self.move(-1) @locked @onstate("EDIT") def move_right(self): """Move caret one character to the right. Returns ------- succ : bool """ return self.move(+1) @locked @onstate("EDIT") def move_to_start(self): """Move caret to the start of buffer. Returns ------- succ : bool """ return self.move_to(0) @locked @onstate("EDIT") def move_to_end(self): """Move caret to the end of buffer. Returns ------- succ : bool """ return self.move_to(None) @locked @onstate("EDIT") def move_to_word_start(self): """Move caret to the start of the word. Returns ------- succ : bool """ mask = self.editor.to_word_start() return self.move_to(mask.start) @locked @onstate("EDIT") def move_to_word_end(self): """Move caret to the end of the word. Returns ------- succ : bool """ mask = self.editor.to_word_end() return self.move_to(mask.stop) @locked @onstate("EDIT") def move_to_token_start(self): """Move caret to the start of the token. Returns ------- succ : bool """ _, token = self.editor.find_token_before(self.editor.pos) if token is None: return self.move_to(0) else: return self.move_to(token.mask.start) @locked @onstate("EDIT") def move_to_token_end(self): """Move caret to the end of the word. Returns ------- succ : bool """ _, token = self.editor.find_token_after(self.editor.pos) if token is None: return self.move_to(None) else: return self.move_to(token.mask.stop) @locked @onstate("EDIT") def help(self): """Help for command. Print some hint for the command before the caret. Returns ------- succ : bool """ # find the token before the caret index, token = self.editor.find_token_before(self.editor.pos) if token is None: return False if self.hint_manager.get_hint_location() != index: self.ask_for_hint(index) return False return self.hint_manager.popup_hint() @locked @onstate("EDIT") def confirm(self): """Finish the command. Returns ------- succ : bool `False` if the command is wrong. """ self.cancel_hint() if not self.editor.tokens: self._finish_session(EmptyResult()) return True command_str = "".join(self.editor.buffer).strip() if self.editor.lex_state == sheditors.SHLEXER_STATE.BACKSLASHED: res, index = ( ShellSyntaxError("No escaped character"), len(self.editor.tokens) - 1, ) elif self.editor.lex_state == sheditors.SHLEXER_STATE.QUOTED: res, index = ( ShellSyntaxError("No closing quotation"), len(self.editor.tokens) - 1, ) else: res, index = self.editor.result, self.editor.length if isinstance(res, cmd.CommandUnfinishError): self._finish_session(ErrorResult(command_str, None, res)) return False elif isinstance(res, (cmd.CommandParseError, ShellSyntaxError)): self._finish_session(ErrorResult(command_str, index, res)) return False else: self._finish_session( CompleteResult(str(self.editor.group), command_str, res) ) return True @locked @onstate("EDIT") def exit_if_empty(self): """Finish the command. Returns ------- succ : bool `False` if unfinished or the command is wrong. """ if self.editor.buffer: return False self.insert("bye") return self.confirm() @locked @onstate("EDIT") def forward_autocomplete(self): """Autocomplete forwardly. Complete the token on the caret, or fill in suggestions if caret is located in between. Returns ------- succ : bool `True` if is in autocompletion cycle. Note that it will be `False` for no suggestion or one suggestion case. """ index = self.autocomplete_manager.autocomplete(action=+1) is_in_cycle = self.autocomplete_manager.is_in_cycle() self.update_buffer(clear=True) if index is not None: self.ask_for_hint(index, type="info") return is_in_cycle @locked @onstate("EDIT") def backward_autocomplete(self): """Autocomplete backwardly. Complete the token on the caret backwardly, or fill in suggestions if caret is located in between. Returns ------- succ : bool `True` if is in autocompletion cycle. Note that it will be `False` for no suggestion or one suggestion case. """ index = self.autocomplete_manager.autocomplete(action=-1) is_in_cycle = self.autocomplete_manager.is_in_cycle() self.update_buffer(clear=True) if index is not None: self.ask_for_hint(index, type="info") return is_in_cycle @locked @onstate("EDIT") def finish_autocomplete(self): r"""Finish autocompletion. Returns ------- succ : bool """ index = self.autocomplete_manager.finish_autocomplete() if index is not None: self.ask_for_hint(index, type="info") return True @locked @onstate("EDIT") def cancel_autocomplete(self): r"""Cancel autocompletion. Returns ------- succ : bool """ self.autocomplete_manager.cancel_autocomplete() self.update_buffer(clear=True) return True @locked def unknown_key(self, key): self.cancel_hint() command_str = "".join(self.editor.buffer).strip() self._finish_session( ErrorResult(command_str, None, ValueError(f"Unknown key: " + key)) ) class InputStroke: r"""Keyboard controller.""" def __init__(self, input, settings): self.input = input self.settings = settings @staticmethod def _parse_action(func): ACTION_REGEX = "({fn}{op})*{fn}".format( fn=r"input\.(?!_)\w+\(\)", op=r"( \| | \& | and | or )", ) if not re.match(ACTION_REGEX, func): raise ValueError(f"invalid action: {repr(func)}") def action(input): with input.edit_ctxt.on(): eval(func, {}, {"input": input}) return action def register(self, controller): r"""Register handler to the given controller. Parameters ---------- controller : engines.Controller """ controller.add_handler(self.keypress_handler()) controller.add_handler( self.autocomplete_handler( self.settings.autocomplete_keys, self.settings.help_key ) ) controller.add_handler(self.printable_handler()) for key, func in self.settings.keymap.items(): action = self._parse_action(func) action_handler = lambda _, action=action: action(self.input) controller.add_handler(action_handler, key) controller.add_handler(self.help_handler(), self.settings.help_key) controller.add_handler(self.confirm_handler(), self.settings.confirm_key) controller.add_handler(self.unknown_handler(self.settings)) def keypress_handler(self): def keypress(_): self.input.key_pressed_counter += 1 return keypress def confirm_handler(self): return lambda _: self.input.confirm() def help_handler(self): return lambda _: self.input.help() def autocomplete_handler(self, keys, help_key): next_key, prev_key, cancel_key = keys def handler(args): _, time, keyname, keycode = args if keyname == next_key: self.input.forward_autocomplete() elif keyname == prev_key: self.input.backward_autocomplete() elif keyname == cancel_key: self.input.cancel_autocomplete() elif keyname != help_key: self.input.finish_autocomplete() return handler def printable_handler(self): def handler(args): _, time, keyname, keycode = args if keycode.isprintable(): self.input.insert(keycode) return handler def unknown_handler(self, settings): keys = list(settings.keymap.keys()) keys.append(settings.confirm_key) keys.append(settings.help_key) keys.extend(settings.autocomplete_keys) def handler(args): _, _, key, code = args if key not in keys and not code.isprintable(): self.input.unknown_key(key) return handler class InputView: def __init__(self, input): self.input = input self.key_pressed = False self.buffer = [] self.tokens = [] self.pos = 0 self.highlighted = None self.typeahead = "" self.clean = False self.hint = None self.popup = [] self.suggestions = None self.state = "EDIT" @dn.datanode def load(self, fin_event): buffer_modified_counter = None key_pressed_counter = None res, time, width = yield while True: with self.input.edit_ctxt.lock: if self.input.buffer_modified_counter != buffer_modified_counter: buffer_modified_counter = self.input.buffer_modified_counter self.buffer = list(self.input.editor.buffer) self.tokens = list(self.input.editor.tokens) self.pos = self.input.editor.pos self.typeahead = self.input.typeahead self.clean = self.input.result is not None self.hint = self.input.hint_manager.get_hint() self.suggestions = ( self.input.autocomplete_manager.get_suggestions_list(), self.input.autocomplete_manager.get_suggestions_index(), ) self.popup = [] while True: try: hint = self.input.hint_manager.popup_queue.get(False) except queue.Empty: break self.popup.append(hint) if isinstance(self.input.result, ErrorResult): self.highlighted = self.input.result.index else: self.highlighted = self.input.hint_manager.get_hint_location() self.state = self.input.state self.key_pressed = self.input.key_pressed_counter != key_pressed_counter key_pressed_counter = self.input.key_pressed_counter res, time, width = yield res # fin if self.state == "FIN" and not fin_event.is_set(): fin_event.set() @dataclasses.dataclass(frozen=True) class ByAddress: value: object def __eq__(self, other): if not isinstance(other, ByAddress): return False return self.value is other.value class TextRenderer: def __init__(self, rich, settings): self.rich = rich self.settings = settings @staticmethod def _render_grammar_key(buffer, tokens, typeahead, pos, highlighted, clean): return ( ByAddress(buffer), typeahead, pos, highlighted, clean, ) def render_grammar( self, buffer, tokens, typeahead, pos, highlighted, clean, caret_markup, typeahead_template, highlight_template, ): length = len(buffer) buffer = list(buffer) for token in tokens: # markup whitespace for index in range(token.mask.start, token.mask.stop): if buffer[index] == " ": buffer[index] = self.rich.tags["ws"]() # markup escape for index in token.quotes: if buffer[index] == "'": buffer[index] = self.rich.tags["qt"]() elif buffer[index] == "\\": buffer[index] = self.rich.tags["bs"]() else: assert False # markup caret, typeahead if clean: typeahead = "" if pos == length and not typeahead: buffer.append(" ") if not clean: if pos < len(buffer): buffer[pos] = caret_markup(mu.join([buffer[pos]]).children) else: typeahead = caret_markup(mu.join(typeahead[:1]).children), typeahead[1:] typeahead_markup = typeahead_template(mu.join(typeahead)) res = [] prev_index = 0 for n, token in enumerate(tokens): # markup delimiter delimiter_markup = mu.join(buffer[prev_index : token.mask.start]) res.append(delimiter_markup) prev_index = token.mask.stop # markup token token_markup = mu.join(buffer[token.mask]) if token.type is None: if clean or token.mask.stop != length: token_markup = self.rich.tags["unk"](token_markup.children) elif token.type is cmd.TOKEN_TYPE.COMMAND: token_markup = self.rich.tags["cmd"](token_markup.children) elif token.type is cmd.TOKEN_TYPE.KEYWORD: token_markup = self.rich.tags["kw"](token_markup.children) elif token.type is cmd.TOKEN_TYPE.ARGUMENT: token_markup = self.rich.tags["arg"](token_markup.children) else: assert False # markup highlight if n == highlighted: token_markup = highlight_template(token_markup) res.append(token_markup) else: delimiter_markup = mu.join(buffer[prev_index:]) res.append(delimiter_markup) markup = mu.Group((*res, typeahead_markup)) markup = markup.expand() return markup @dn.datanode def render_text(self, state): typeahead_template = self.rich.parse(self.settings.typeahead, slotted=True) highlight_template = self.rich.parse(self.settings.highlight, slotted=True) render_grammar = dn.starcachemap( self.render_grammar, key=self._render_grammar_key, caret_markup=Caret, typeahead_template=typeahead_template, highlight_template=highlight_template, ) with render_grammar: yield while True: markup = render_grammar.send( ( state.buffer, state.tokens, state.typeahead, state.pos, state.highlighted, state.clean, ) ) yield markup, state.key_pressed class MsgRenderer: def __init__(self, rich, settings): self.rich = rich self.settings = settings @dn.datanode def render_msg(self, state): message_max_lines = self.settings.message_max_lines sugg_lines = self.settings.suggestions_lines sugg_items = self.settings.suggestion_items message_overflow_ellipsis = self.settings.message_overflow_ellipsis suggestion_overflow_ellipses = self.settings.suggestion_overflow_ellipses msg_ellipsis = self.rich.parse(message_overflow_ellipsis) msg_ellipsis_width = self.rich.widthof(msg_ellipsis) if msg_ellipsis_width == -1: raise ValueError(f"invalid ellipsis: {message_overflow_ellipsis!r}") sugg_top_ellipsis = self.rich.parse( suggestion_overflow_ellipses[0], slotted=True ) sugg_bottom_ellipsis = self.rich.parse( suggestion_overflow_ellipses[1], slotted=True ) sugg_items_templates = ( self.rich.parse(sugg_items[0], slotted=True), self.rich.parse(sugg_items[1], slotted=True), ) desc_template = self.rich.parse(self.settings.desc_message, slotted=True) info_template = self.rich.parse(self.settings.info_message, slotted=True) render_hint = dn.starcachemap( self.render_hint, message_max_lines=message_max_lines, msg_ellipsis=msg_ellipsis, sugg_lines=sugg_lines, sugg_items_templates=sugg_items_templates, sugg_ellipses=(sugg_top_ellipsis, sugg_bottom_ellipsis), desc_template=desc_template, info_template=info_template, ) with render_hint: (view, msgs, logs), time, width = yield while True: msg = render_hint.send((state.hint, state.suggestions)) if msg is None: if len(msgs) != 0: msgs.clear() else: if len(msgs) != 1 or msgs[0] is not msg: msgs.clear() msgs.append(msg) logs.extend( self.render_popup( state.popup, desc_template=desc_template, info_template=info_template, ) ) (view, msgs, logs), time, width = yield (view, msgs, logs) def render_hint( self, hint, suggestions, *, message_max_lines, msg_ellipsis, sugg_lines, sugg_items_templates, sugg_ellipses, desc_template, info_template, ): msgs = [] # draw hint msg = None if hint is not None and hint.message: msg = self.rich.parse(hint.message, root_tag=True) lines = 0 def trim_lines(text): nonlocal lines if lines >= message_max_lines: return mu.Text("") if isinstance(text, mu.Newline): lines += 1 if lines == message_max_lines: return mu.Group((text, msg_ellipsis)) else: for i, ch in enumerate(text.string): if ch == "\n": lines += 1 if lines == message_max_lines: return mu.Group( (mu.Text(text.string[: i + 1]), msg_ellipsis) ) return text msg = msg.traverse((mu.Text, mu.Newline), trim_lines) if isinstance(hint, DescHint): msg = desc_template(msg) elif isinstance(hint, InfoHint): msg = info_template(msg) else: assert False msg = msg.expand() if suggestions[0] is not None: suggs_list, sugg_index = suggestions sugg_start = sugg_index // sugg_lines * sugg_lines sugg_end = sugg_start + sugg_lines suggs = suggs_list[sugg_start:sugg_end] res = [] for i, sugg in enumerate(suggs): sugg = mu.Text(sugg) item_template = ( sugg_items_templates[1] if i == sugg_index - sugg_start else sugg_items_templates[0] ) sugg = item_template(sugg) res.append(sugg) if i == sugg_index - sugg_start and msg is not None: res.append(msg) if sugg_start > 0: res.insert(0, sugg_ellipses[0](mu.Text(f"{sugg_start} more"))) if sugg_end < len(suggs_list): res.append( sugg_ellipses[1](mu.Text(f"{len(suggs_list) - sugg_end} more")) ) nl = mu.Text("\n") is_fst = True for block in res: if not is_fst: msgs.append(nl) msgs.append(block) is_fst = False else: if msg is not None: msgs.append(msg) return mu.Group(tuple(msgs)) if msgs else None def render_popup(self, popup, *, desc_template, info_template): logs = [] # draw popup for hint in popup: msg = None if hint.message: msg = self.rich.parse(hint.message, root_tag=True) if isinstance(hint, DescHint): msg = desc_template(msg) elif isinstance(hint, InfoHint): msg = info_template(msg) else: assert False msg = mu.Group((msg, mu.Text("\n"))) msg = msg.expand() if msg is not None: logs.append(msg) return logs
PypiClean
/Draugr-1.0.9.tar.gz/Draugr-1.0.9/draugr/visualisation/matplotlib_utilities/signal_data/3d_spectrum.py
__author__ = "Christian Heider Nielsen" __doc__ = r""" Created on 06-01-2021 """ from typing import Sequence import mpl_toolkits.mplot3d.axes3d as p3 import numpy from matplotlib import animation, cm, pyplot from mpl_toolkits.mplot3d import axes3d from scipy.signal import chirp, spectrogram from warg import next_pow_2 __all__ = ["spectral_plot3d", "spectrum_plot3d"] # TODO: ANIMATED VARIANT, maybe as a drawer! def spectral_plot3d( time: numpy.ndarray, frequencies: numpy.ndarray, fxt: numpy.ndarray ) -> pyplot.Figure: """ return new figure of a 3d plot of the spectrogram of the signal """ assert fxt.shape == (*frequencies.shape, *time.shape) assert fxt.dtype == numpy.complex fig = pyplot.figure() ax = p3.Axes3D(fig) x, y = numpy.meshgrid(time, frequencies) # colors=cm.jet(norm(colorfunction)) colors = numpy.empty(x.shape, dtype=numpy.float) z = numpy.empty(x.shape, dtype=numpy.float) for y_i in range(len(time)): for x_i in range(len(frequencies)): com = fxt[x_i, y_i] z[x_i, y_i] = com.real colors[x_i, y_i] = com.imag * 0.5 + 0.5 colors = colors / colors.max() surf = ax.plot_surface( x, y, z, facecolors=cm.jet(colors), # linewidth=0 # color='0.75', # rstride=1, # cstride=1 # rcount=50 # #ccount=50 ) ax.set_ylabel("Frequency [kHz]") ax.set_xlabel("Time [s]") ax.set_zlabel("Magnitude") return fig def spectrum_plot3d( signal: Sequence, sampling_rate: int, window_length_ms=(20 / 1000) ) -> pyplot.Figure: """return new figure of a 3d plot of the spectrum of the signal""" n_per_seg = next_pow_2( sampling_rate * window_length_ms ) # 20 ms, next_pow_2 per seg == n_fft f, t, fxt = spectrogram( signal, fs=sampling_rate, window="hanning", nperseg=n_per_seg, scaling="spectrum", mode="complex", ) return spectral_plot3d(t, f, fxt) if __name__ == "__main__": def asdijaisd() -> None: """ :rtype: None """ sr = 1000 t = numpy.arange(sr * 4) / sr # noise = numpy.random.rand(sr * 2) * 0.001 w = chirp(t, f0=100, f1=500, t1=4, method="linear") signal = numpy.sin(200 * 2 * numpy.pi * t) + w # + noise spectrum_plot3d(signal, sr) pyplot.show() def aisjd() -> None: """ :rtype: None """ fig = pyplot.figure() ax = axes3d.Axes3D(fig) def gen(n): """description""" phi = 0 while phi < 2 * numpy.pi: yield numpy.array([numpy.cos(phi), numpy.sin(phi), phi]) phi += 2 * numpy.pi / n def update(num, data, line): """description""" line.set_data(data[:2, :num]) line.set_3d_properties(data[2, :num]) def asudh(): """description""" N = 100 data = numpy.array(list(gen(N))).T (line,) = ax.plot(data[0, 0:1], data[1, 0:1], data[2, 0:1]) # Setting the axes properties ax.set_xlim3d([-1.0, 1.0]) ax.set_xlabel("X") ax.set_ylim3d([-1.0, 1.0]) ax.set_ylabel("Y") ax.set_zlim3d([0.0, 10.0]) ax.set_zlabel("Z") ani = animation.FuncAnimation( fig, update, N, fargs=(data, line), interval=10000 / N, blit=False ) # ani.save('matplot003.gif', writer='imagemagick') pyplot.show() asudh() # aisjd() asdijaisd()
PypiClean
/Audit-Alembic-0.1.0.tar.gz/Audit-Alembic-0.1.0/ci/appveyor-download.py
from __future__ import unicode_literals import argparse import os import zipfile import requests def make_auth_headers(): """Make the authentication headers needed to use the Appveyor API.""" path = os.path.expanduser("~/.appveyor.token") if not os.path.exists(path): raise RuntimeError( "Please create a file named `.appveyor.token` in your home directory. " "You can get the token from https://ci.appveyor.com/api-token" ) with open(path) as f: token = f.read().strip() headers = { 'Authorization': 'Bearer {}'.format(token), } return headers def download_latest_artifacts(account_project, build_id): """Download all the artifacts from the latest build.""" if build_id is None: url = "https://ci.appveyor.com/api/projects/{}".format(account_project) else: url = "https://ci.appveyor.com/api/projects/{}/build/{}".format(account_project, build_id) build = requests.get(url, headers=make_auth_headers()).json() jobs = build['build']['jobs'] print(u"Build {0[build][version]}, {1} jobs: {0[build][message]}".format(build, len(jobs))) for job in jobs: name = job['name'] print(u" {0}: {1[status]}, {1[artifactsCount]} artifacts".format(name, job)) url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts".format(job['jobId']) response = requests.get(url, headers=make_auth_headers()) artifacts = response.json() for artifact in artifacts: is_zip = artifact['type'] == "Zip" filename = artifact['fileName'] print(u" {0}, {1} bytes".format(filename, artifact['size'])) url = "https://ci.appveyor.com/api/buildjobs/{}/artifacts/{}".format(job['jobId'], filename) download_url(url, filename, make_auth_headers()) if is_zip: unpack_zipfile(filename) os.remove(filename) def ensure_dirs(filename): """Make sure the directories exist for `filename`.""" dirname = os.path.dirname(filename) if dirname and not os.path.exists(dirname): os.makedirs(dirname) def download_url(url, filename, headers): """Download a file from `url` to `filename`.""" ensure_dirs(filename) response = requests.get(url, headers=headers, stream=True) if response.status_code == 200: with open(filename, 'wb') as f: for chunk in response.iter_content(16 * 1024): f.write(chunk) else: print(u" Error downloading {}: {}".format(url, response)) def unpack_zipfile(filename): """Unpack a zipfile, using the names in the zip.""" with open(filename, 'rb') as fzip: z = zipfile.ZipFile(fzip) for name in z.namelist(): print(u" extracting {}".format(name)) ensure_dirs(name) z.extract(name) parser = argparse.ArgumentParser(description='Download artifacts from AppVeyor.') parser.add_argument('--id', metavar='PROJECT_ID', default='jpassaro/Audit-Alembic', help='Project ID in AppVeyor.') parser.add_argument('build', nargs='?', metavar='BUILD_ID', help='Build ID in AppVeyor. Eg: master-123') if __name__ == "__main__": # import logging # logging.basicConfig(level="DEBUG") args = parser.parse_args() download_latest_artifacts(args.id, args.build)
PypiClean
/ElasticTabstops-1.0.1.tar.gz/ElasticTabstops-1.0.1/elastictabstops/convert.py
# This file tries to follow the Style Guide for Python Code (PEP 8) *EXCEPT* # * it uses tabs for indenting (like Guido used to recommend) # * it doesn't follow the Maximum Line Length rule # use pylint as following: pylint --indent-string='\t' --max-line-length=1000 elastictabstops from collections import namedtuple import math import re # This code can be used to convert large amounts of text, so performance matters. # For this reason we use namedtuples and __slots__ to create readable but well-performing data structures. PositionedText = namedtuple('PositionedText', ['text', 'position']) class SizedText(object): """Class used to store text and the width of the cell it's in.""" __slots__ = ['text', 'size'] def __init__(self, text, tab_width, multiples_of_tab_width): self.text = text # size initially stores the minimum width of the cell # we add two to provide padding - one is not enough as it could be confused for a non-aligning space if multiples_of_tab_width: self.size = int((math.ceil((len(self.text) + 2) / float(tab_width)))) * tab_width else: self.size = max(len(self.text) + 2, tab_width) def get_padded_text(self): """Returns self.text plus spaces to match the number of characters in self.size.""" nof_spaces = self.size - len(self.text) return self.text + (' ' * nof_spaces) def _cell_exists(list_of_lists, line_num, cell_num): """Check that an item exists in a list of lists.""" return line_num < len(list_of_lists) and cell_num < len(list_of_lists[line_num]) def _sub_tabs(line, tab_width, repl_char): """Return a line of text where tab characters have been substituted with the correct number of replacement characters.""" str_list = [] pos = 0 for char in line: if char == '\t': expand = tab_width - (pos % tab_width) str_list.append(expand * repl_char) pos += expand else: str_list.append(char) pos += 1 return ''.join(str_list) def _get_positions_contents(text, tab_width): """Given a piece of text and how long tabs should be, return a list of lists of PositionedText named tuples.""" repl_char = '\x1a' # the 'substitute character' in unicode text = '\n'.join([_sub_tabs(line, tab_width, repl_char) for line in text.split('\n')]) # Look for a char that is (not a space or \x1a) followed by any number of chars that are either (not a space or \x1a) or a space followed by (not a space or \x1a) # This allows the substrings to have spaces, but only if that space is followed by a non-space char compiled = re.compile(r'[^%(repl_char)s\s](?:[^%(repl_char)s\s]|\s(?=[^%(repl_char)s\s]))*' % {'repl_char': repl_char}) return [[PositionedText(match.group(), match.start()) for match in compiled.finditer(line)] for line in text.split('\n')] def _from_spaces(text, tab_width): """Convert spaces aligned text to table.""" if not isinstance(text, str): raise TypeError("The first parameter of _from_spaces ('text') should be a string.") if not isinstance(tab_width, int): raise TypeError("The second parameter of _from_spaces ('tab_width') should be an integer.") if tab_width < 2: raise ValueError("The second parameter of _from_spaces ('tab_width') should be 2 or greater.") # '\r's before '\n's are just left at the end of lines # solitary '\r's aren't dealt with as these days no one uses CRs on their own for new lines lines = _get_positions_contents(text, tab_width) max_cells = max([len(line) for line in lines]) nof_lines = len(lines) # not a "for cell_num in (range(max_cells)):" loop because max_cells may increase cell_num = 0 while cell_num < max_cells: starting_new_block = True start_range = 0 end_range = 0 for line_num in range(nof_lines + 1): if _cell_exists(lines, line_num, cell_num): # if we're at the start of a block remember what line we're on if starting_new_block: start_range = line_num starting_new_block = False end_range = line_num # if there's no cell and we're not starting a block then we're at the end of a column block elif not starting_new_block: block_positions = [lines[block_line_num][cell_num].position for block_line_num in range(start_range, end_range + 1)] min_indent = min(block_positions) for block_line_offset, block_position in enumerate(block_positions): block_line_num = start_range + block_line_offset # if the current block is to the right we need to insert an empty cell if block_position > min_indent: # insert an empty cell to shift existing cells across lines[block_line_num].insert(cell_num, PositionedText('', 0)) max_cells = max(max_cells, len(lines[block_line_num])) # otherwise if we're in the first column we need to insert empty cells for every line in this block elif cell_num == 0: nof_cells_missing = int(block_position / tab_width) for _ in range(nof_cells_missing): # insert empty indentation cells lines[block_line_num].insert(cell_num, PositionedText('', 0)) max_cells = max(max_cells, len(lines[block_line_num])) starting_new_block = True cell_num += 1 return [([cell.text for cell in line] or ['']) for line in lines] def _to_elastic_tabstops(table): """Convert table to elastic tabstops aligned text.""" if not isinstance(table, list): raise TypeError("The first parameter of _to_elastic_tabstops ('table') should be a list.") return '\n'.join(['\t'.join(row) for row in table]) def _to_fixed_tabstops(table, tab_width): """Convert table to fixed tabstops aligned text.""" if not isinstance(table, list): raise TypeError("The first parameter of _to_fixed_tabstops ('table') should be a list.") if not isinstance(tab_width, int): raise TypeError("The second parameter of _to_fixed_tabstops ('tab_width') should be an integer .") if tab_width < 2: raise ValueError("The second parameter of _to_fixed_tabstops ('tab_width') should be 2 or greater.") spaced_text = _to_spaces(table, tab_width, multiples_of_tab_width=True) lines = _get_positions_contents(spaced_text, tab_width) tabbed_text = [] for line in lines: pos = 0 tabbed_line = '' for cell in line: gap = cell.position - pos num_tabs = int(math.floor((gap + (tab_width - 1))/ tab_width)) num_spaces = cell.position % tab_width tabbed_line += ('\t' * num_tabs) + (' ' * num_spaces) + cell.text pos = cell.position + len(cell.text) tabbed_text.append(tabbed_line) return '\n'.join(tabbed_text) def _from_fixed_tabstops(text, tab_width): """Convert fixed tabstops aligned text to table.""" if not isinstance(text, str): raise TypeError("The first parameter of _from_fixed_tabstops ('text') should be a string.") if not isinstance(tab_width, int): raise TypeError("The second parameter of _from_fixed_tabstops ('tab_width') should be an integer.") if tab_width < 2: raise ValueError("The second parameter of _from_fixed_tabstops ('tab_width') should be 2 or greater.") expanded = text.expandtabs(tab_width) return _from_spaces(expanded, tab_width) def _from_elastic_tabstops(text): """Convert elastic tabstops aligned text to table.""" if not isinstance(text, str): raise TypeError("The first parameter of _from_elastic_tabstops ('text') should be a string.") # '\r's before '\n's are just left at the end of lines # solitary '\r's aren't dealt with as these days no one uses CRs on their own for new lines return [line.split('\t') for line in text.split('\n')] def _to_spaces(table, tab_width, multiples_of_tab_width=False): """Convert table to spaces aligned text.""" if not isinstance(table, list): raise TypeError("The first parameter of _to_spaces ('table') should be a list.") if not isinstance(tab_width, int): raise TypeError("The second parameter of _to_spaces ('tab_width') should be an integer .") if tab_width < 2: raise ValueError("The second parameter of _to_spaces ('tab_width') should be 2 or greater.") lines = [[SizedText(cell, tab_width, multiples_of_tab_width) for cell in row] for row in table] max_cells = max([len(line) for line in lines]) nof_lines = len(lines) for cell_num in range(max_cells): starting_new_block = True start_range = 0 end_range = 0 max_width = 0 for line_num in range(nof_lines): # check if there's a cell to the right of this column (which means this cell ends in a tab) - we only care about terminated cells if _cell_exists(lines, line_num, cell_num + 1): # if we're at the start of a block remember what line we're on if starting_new_block: start_range = line_num starting_new_block = False # record the max width of the block so far max_width = max(max_width, lines[line_num][cell_num].size) end_range = line_num # if the cell has not been terminated and we're not starting a block then we're at the end of a column block elif not starting_new_block: # iterate over all cells in the block and set their width to the max width for block_line_num in range(start_range, end_range + 1): lines[block_line_num][cell_num].size = max_width starting_new_block = True max_width = 0 # if we got to the last line without setting the size of the current block, do that now if not starting_new_block: for block_line_num in range(start_range, end_range + 1): lines[block_line_num][cell_num].size = max_width # append text and spaces to new_text new_text = [''] * nof_lines for line_num in range(nof_lines): if len(lines[line_num]) > 0: for cell_num in range(len(lines[line_num]) - 1): new_text[line_num] += lines[line_num][cell_num].get_padded_text() last_cell_num = len(lines[line_num]) - 1 new_text[line_num] += lines[line_num][last_cell_num].text return '\n'.join(new_text)
PypiClean
/Kamaelia-0.6.0.tar.gz/Kamaelia-0.6.0/Tools/Whiteboard/Whiteboard.py
import os import sys import Axon import pygame from Axon.Component import component from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess from Kamaelia.Chassis.Graphline import Graphline from Kamaelia.Chassis.Pipeline import Pipeline from Kamaelia.Chassis.ConnectedServer import SimpleServer from Kamaelia.Internet.TCPClient import TCPClient from Kamaelia.Util.Console import ConsoleEchoer from Kamaelia.Visualisation.PhysicsGraph.chunks_to_lines import chunks_to_lines from Kamaelia.Visualisation.PhysicsGraph.lines_to_tokenlists import lines_to_tokenlists as text_to_tokenlists from Kamaelia.Util.NullSink import nullSinkComponent from Kamaelia.Util.Backplane import Backplane, PublishTo, SubscribeTo from Kamaelia.Util.Detuple import SimpleDetupler from Kamaelia.Util.Console import ConsoleEchoer # # The following application specific components will probably be rolled # back into the repository. # from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapper, FilterAndTagWrapper from Kamaelia.Apps.Whiteboard.TagFiltering import TagAndFilterWrapperKeepingTag, FilterAndTagWrapperKeepingTag from Kamaelia.Apps.Whiteboard.Tokenisation import tokenlists_to_lines, lines_to_tokenlists from Kamaelia.Apps.Whiteboard.Canvas import Canvas from Kamaelia.Apps.Whiteboard.Painter import Painter from Kamaelia.Apps.Whiteboard.SingleShot import OneShot from Kamaelia.Apps.Whiteboard.CheckpointSequencer import CheckpointSequencer from Kamaelia.Apps.Whiteboard.Entuple import Entuple from Kamaelia.Apps.Whiteboard.Routers import Router, TwoWaySplitter, ConditionalSplitter from Kamaelia.Apps.Whiteboard.Palette import buildPalette, colours from Kamaelia.Apps.Whiteboard.Options import parseOptions from Kamaelia.Apps.Whiteboard.UI import PagingControls, LocalPagingControls, Eraser, ClearPage from Kamaelia.Apps.Whiteboard.CommandConsole import CommandConsole try: from Kamaelia.Codec.Speex import SpeexEncode,SpeexDecode except Exception, e: print "Speex not available, using null components instead" SpeexEncode = nullSinkComponent SpeexDecode = nullSinkComponent try: from Kamaelia.Apps.Whiteboard.Audio import SoundInput except ImportError: print "SoundInput not available, using NullSink instead" SoundInput = nullSinkComponent try: from Kamaelia.Apps.Whiteboard.Audio import SoundOutput except ImportError: print "SoundOutput not available, using NullSink instead" SoundOutput = nullSinkComponent try: from Kamaelia.Apps.Whiteboard.Audio import RawAudioMixer except ImportError: print "RawAudioMixer not available, using NullSink instead" RawAudioMixer = nullSinkComponent notepad = None if len(sys.argv) >1: if os.path.exists(sys.argv[1]): if os.path.isdir(sys.argv[1]): notepad = sys.argv[1] if (notepad is None) and os.path.exists("Scribbles"): if os.path.isdir("Scribbles"): notepad = "Scribbles" if (notepad is None): N = os.path.join(os.path.expanduser("~"),"Scribbles") if not os.path.exists(N): os.makedirs(N) if os.path.isdir(N): notepad = N if (notepad is None): print "Can't figure out what to do with piccies. Exitting" sys.exit(0) # # Misplaced encapsulation --> Kamaelia.Apps.Whiteboard.Palette # colours_order = [ "black", "red", "orange", "yellow", "green", "turquoise", "blue", "purple", "darkgrey", "lightgrey" ] num_pages = len(os.listdir(notepad)) def FilteringPubsubBackplane(backplaneID,**FilterTagWrapperOptions): """Sends tagged events to a backplane. Emits events not tagged by this pubsub.""" return FilterAndTagWrapper( Pipeline( PublishTo(backplaneID), # well, should be to separate pipelines, this is lazier! SubscribeTo(backplaneID), ), **FilterTagWrapperOptions ) def clientconnector(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500): return Pipeline( chunks_to_lines(), lines_to_tokenlists(), Graphline( ROUTER = Router( ((lambda T : T[0]=="SOUND"), "audio"), ((lambda T : T[0]!="SOUND"), "whiteboard"), ), WHITEBOARD = FilteringPubsubBackplane(whiteboardBackplane), AUDIO = Pipeline( SimpleDetupler(1), # remove 'SOUND' tag SpeexDecode(3), FilteringPubsubBackplane(audioBackplane, dontRemoveTag=True), RawAudioMixer(), SpeexEncode(3), Entuple(prefix=["SOUND"],postfix=[]), ), linkages = { # incoming messages go to a router ("", "inbox") : ("ROUTER", "inbox"), # distribute messages to appropriate destinations ("ROUTER", "audio") : ("AUDIO", "inbox"), ("ROUTER", "whiteboard") : ("WHITEBOARD", "inbox"), # aggregate all output ("AUDIO", "outbox") : ("", "outbox"), ("WHITEBOARD", "outbox") : ("", "outbox"), # shutdown routing, not sure if this will actually work, but hey! ("", "control") : ("ROUTER", "control"), ("ROUTER", "signal") : ("AUDIO", "control"), ("AUDIO", "signal") : ("WHITEBOARD", "control"), ("WHITEBOARD", "signal") : ("", "signal") }, ), tokenlists_to_lines(), ) #/------------------------------------------------------------------------- # Server side of the system # def LocalEventServer(whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO", port=1500): def configuredClientConnector(): return clientconnector(whiteboardBackplane=whiteboardBackplane, audioBackplane=audioBackplane, port=port) return SimpleServer(protocol=clientconnector, port=port) #/------------------------------------------------------------------------- # Client side of the system # def EventServerClients(rhost, rport, whiteboardBackplane="WHITEBOARD", audioBackplane="AUDIO"): # plug a TCPClient into the backplane loadingmsg = "Fetching sketch from server..." return Graphline( # initial messages sent to the server, and the local whiteboard GETIMG = Pipeline( OneShot(msg=[["GETIMG"]]), tokenlists_to_lines() ), BLACKOUT = OneShot(msg="CLEAR 0 0 0\r\n" "WRITE 100 100 24 255 255 255 "+loadingmsg+"\r\n"), NETWORK = TCPClient(host=rhost,port=rport), APPCOMMS = clientconnector(whiteboardBackplane=whiteboardBackplane, audioBackplane=audioBackplane), linkages = { ("GETIMG", "outbox") : ("NETWORK", "inbox"), # Single shot out ("APPCOMMS", "outbox") : ("NETWORK", "inbox"), # Continuous out ("BLACKOUT", "outbox") : ("APPCOMMS", "inbox"), # Single shot in ("NETWORK", "outbox") : ("APPCOMMS", "inbox"), # Continuous in } ) #/------------------------------------------------------------------------- class LocalPageEventsFilter(ConditionalSplitter): # This is a data tap/siphon/demuxer def condition(self, data): return (data == [["prev"]]) or (data == [["next"]]) def true(self,data): self.send((data[0][0], "local"), "true") SLIDESPEC = notepad+"/slide.%d.png" def makeBasicSketcher(left=0,top=0,width=1024,height=768): return Graphline( CANVAS = Canvas( position=(left,top+32),size=(width,height-32) ), PAINTER = Painter(), PALETTE = buildPalette( cols=colours, order=colours_order, topleft=(left+64,top), size=32 ), ERASER = Eraser(left,top), CLEAR = ClearPage(left+(64*5)+32*len(colours),top), PAGINGCONTROLS = PagingControls(left+64+32*len(colours),top), LOCALPAGINGCONTROLS = LocalPagingControls(left+(64*6)+32*len(colours),top), LOCALPAGEEVENTS = LocalPageEventsFilter(), HISTORY = CheckpointSequencer(lambda X: [["LOAD", SLIDESPEC % (X,)]], lambda X: [["SAVE", SLIDESPEC % (X,)]], lambda X: [["CLEAR"]], initial = 1, highest = num_pages, ), PAINT_SPLITTER = TwoWaySplitter(), LOCALEVENT_SPLITTER = TwoWaySplitter(), DEBUG = ConsoleEchoer(), linkages = { ("CANVAS", "eventsOut") : ("PAINTER", "inbox"), ("PALETTE", "outbox") : ("PAINTER", "colour"), ("ERASER", "outbox") : ("PAINTER", "erase"), ("PAINTER", "outbox") : ("PAINT_SPLITTER", "inbox"), ("CLEAR","outbox") : ("PAINT_SPLITTER", "inbox"), ("PAINT_SPLITTER", "outbox") : ("CANVAS", "inbox"), ("PAINT_SPLITTER", "outbox2") : ("", "outbox"), # send to network ("LOCALPAGINGCONTROLS","outbox") : ("LOCALEVENT_SPLITTER", "inbox"), ("LOCALEVENT_SPLITTER", "outbox2"): ("", "outbox"), # send to network ("LOCALEVENT_SPLITTER", "outbox") : ("LOCALPAGEEVENTS", "inbox"), ("", "inbox") : ("LOCALPAGEEVENTS", "inbox"), ("LOCALPAGEEVENTS", "false") : ("CANVAS", "inbox"), ("LOCALPAGEEVENTS", "true") : ("HISTORY", "inbox"), ("PAGINGCONTROLS","outbox") : ("HISTORY", "inbox"), ("HISTORY","outbox") : ("CANVAS", "inbox"), ("CANVAS", "outbox") : ("", "outbox"), ("CANVAS","surfacechanged") : ("HISTORY", "inbox"), }, ) if __name__=="__main__": mainsketcher = \ Graphline( SKETCHER = makeBasicSketcher(width=1024,height=768), CONSOLE = CommandConsole(), linkages = { ('','inbox'):('SKETCHER','inbox'), ('SKETCHER','outbox'):('','outbox'), ('CONSOLE','outbox'):('SKETCHER','inbox'), } ) # primary whiteboard Pipeline( SubscribeTo("WHITEBOARD"), TagAndFilterWrapper(mainsketcher), PublishTo("WHITEBOARD") ).activate() # primary sound IO - tagged and filtered, so can't hear self Pipeline( SubscribeTo("AUDIO"), TagAndFilterWrapperKeepingTag( Pipeline( RawAudioMixer(), SoundOutput(), ###### SoundInput(), ), ), PublishTo("AUDIO"), ).activate() rhost, rport, serveport = parseOptions() # setup a server, if requested if serveport: LocalEventServer("WHITEBOARD", "AUDIO", port=serveport).activate() # connect to remote host & port, if requested if rhost and rport: EventServerClients(rhost, rport, "WHITEBOARD", "AUDIO").activate() # sys.path.append("../Introspection") # from Profiling import FormattedProfiler # # Pipeline(FormattedProfiler( 20.0, 1.0), # ConsoleEchoer() # ).activate() Backplane("WHITEBOARD").activate() Backplane("AUDIO").run()
PypiClean
/AutoMonkey-0.2.1.tar.gz/AutoMonkey-0.2.1/automonkey/app_funcs.py
from time import sleep from sys import platform if platform == "win32": from os import startfile # It is used elif platform == "linux": from subprocess import call def startfile(file): call(["xdg-open", file]) from os import system from os.path import isfile from re import search from subprocess import Popen from win32con import WM_CLOSE from win32con import SW_RESTORE from win32con import SW_MINIMIZE from win32con import SW_MAXIMIZE from win32gui import IsIconic from win32gui import ShowWindow from win32gui import FindWindow from win32gui import PostMessage from win32gui import EnumWindows from win32gui import GetWindowText from win32gui import SetForegroundWindow from pyautogui import keyUp from pyautogui import keyDown from pyautogui import hotkey as keys2 # this is best solution, pass list to be unpacked with *list from .utils import copy def msoffice_replace(replace_this: str, with_this: str, match_case: bool = True, whole_words: bool = True, delay_factor: float = 1): """Search and replace in all MS Office Software. No Guarantees. Args: replace_this (str): string to be replaced with_this (str): new string delay_factor (float, optional): Delay factor in case the default sleep times for waiting that the replacement is finished are too fast. Defaults to 1. """ copy(replace_this) sleep(0.2 * delay_factor) keys2('ctrl', 'h') sleep(0.2 * delay_factor) if match_case or whole_words: keys2('alt', 'm') sleep(0.2 * delay_factor) if match_case: keys2('alt', 'h') sleep(0.2 * delay_factor) if whole_words: keys2('alt', 'y') sleep(0.2 * delay_factor) keys2('alt', 'n') sleep(0.2 * delay_factor) keys2('ctrl', 'v') sleep(0.2 * delay_factor) copy(with_this) sleep(0.2 * delay_factor) keys2('alt', 'i') sleep(0.2 * delay_factor) keys2('ctrl', 'v') sleep(0.2 * delay_factor) keys2('alt', 'a') sleep(0.2 * delay_factor) keys2('enter') sleep(0.2 * delay_factor) keys2('enter') sleep(0.2 * delay_factor) keys2('esc') sleep(0.2 * delay_factor) keys2('esc') sleep(0.2 * delay_factor) class WindowManager: """Window Manager """ def __init__(self): self._handle = None def get_window_by_class(self, class_name, window_name=None): """Find a window by the class name """ self._handle = FindWindow(class_name, window_name) def _parse_windows(self, hwnd, pattern): """Pass to EnumWindows() to check all opened windows """ if search(pattern, str(GetWindowText(hwnd))) is not None: self._handle = hwnd def get_window_by_title(self, pattern): """Find a window whose title matches the given regex pattern """ self._handle = None EnumWindows(self._parse_windows, pattern) def focus(self): """Bring focus to the selected window """ # SetForegroundWindow works well only after pressing alt keyDown('alt') SetForegroundWindow(self._handle) self.restore() keyUp('alt') def minimize(self): if not IsIconic(self._handle): ShowWindow(self._handle, SW_MINIMIZE) def restore(self): if IsIconic(self._handle): ShowWindow(self._handle, SW_RESTORE) def maximize(self): ShowWindow(self._handle, SW_MAXIMIZE) def close(self): PostMessage(self._handle, WM_CLOSE, 0, 0) def close(title: str): """Close a window """ win_man = WindowManager() win_man.get_window_by_title(f".*?{title}.*?") win_man.close() def minimize(title: str): """Minimize a window """ win_man = WindowManager() win_man.get_window_by_title(f".*?{title}.*?") win_man.minimize() def maximize(title: str): """Minimize a window """ win_man = WindowManager() win_man.get_window_by_title(f".*?{title}.*?") win_man.maximize() def restore(title: str): """Restore a window """ win_man = WindowManager() win_man.get_window_by_title(f".*?{title}.*?") win_man.restore() def focus(title: str): """Bring Focus to a window """ win_man = WindowManager() win_man.get_window_by_title(f".*?{title}.*?") win_man.focus() def open_app(app: str): """Open an application Args: app_path (str): path to the application """ if isfile(app): Popen(app) else: try: system(f"start {app}") except Exception as err: raise Exception(f"Could not open {app} because of {err}") from err
PypiClean
/BlueWhale3-3.31.3.tar.gz/BlueWhale3-3.31.3/doc/visual-programming/build/htmlhelp/_static/searchtools.js
if (!Scorer) { /** * Simple result scoring code. */ var Scorer = { // Implement the following function to further tweak the score for each result // The function takes a result array [filename, title, anchor, descr, score] // and returns the new score. /* score: function(result) { return result[4]; }, */ // query matches the full name of an object objNameMatch: 11, // or matches in the last dotted part of the object name objPartialMatch: 6, // Additive scores depending on the priority of the object objPrio: {0: 15, // used to be importantResults 1: 5, // used to be objectResults 2: -5}, // used to be unimportantResults // Used when the priority is not in the mapping. objPrioDefault: 0, // query found in title title: 15, partialTitle: 7, // query found in terms term: 5, partialTerm: 2 }; } if (!splitQuery) { function splitQuery(query) { return query.split(/\s+/); } } /** * Search Module */ var Search = { _index : null, _queued_query : null, _pulse_status : -1, htmlToText : function(htmlString) { var virtualDocument = document.implementation.createHTMLDocument('virtual'); var htmlElement = $(htmlString, virtualDocument); htmlElement.find('.headerlink').remove(); docContent = htmlElement.find('[role=main]')[0]; if(docContent === undefined) { console.warn("Content block not found. Sphinx search tries to obtain it " + "via '[role=main]'. Could you check your theme or template."); return ""; } return docContent.textContent || docContent.innerText; }, init : function() { var params = $.getQueryParameters(); if (params.q) { var query = params.q[0]; $('input[name="q"]')[0].value = query; this.performSearch(query); } }, loadIndex : function(url) { $.ajax({type: "GET", url: url, data: null, dataType: "script", cache: true, complete: function(jqxhr, textstatus) { if (textstatus != "success") { document.getElementById("searchindexloader").src = url; } }}); }, setIndex : function(index) { var q; this._index = index; if ((q = this._queued_query) !== null) { this._queued_query = null; Search.query(q); } }, hasIndex : function() { return this._index !== null; }, deferQuery : function(query) { this._queued_query = query; }, stopPulse : function() { this._pulse_status = 0; }, startPulse : function() { if (this._pulse_status >= 0) return; function pulse() { var i; Search._pulse_status = (Search._pulse_status + 1) % 4; var dotString = ''; for (i = 0; i < Search._pulse_status; i++) dotString += '.'; Search.dots.text(dotString); if (Search._pulse_status > -1) window.setTimeout(pulse, 500); } pulse(); }, /** * perform a search for something (or wait until index is loaded) */ performSearch : function(query) { // create the required interface elements this.out = $('#search-results'); this.title = $('<h2>' + _('Searching') + '</h2>').appendTo(this.out); this.dots = $('<span></span>').appendTo(this.title); this.status = $('<p class="search-summary">&nbsp;</p>').appendTo(this.out); this.output = $('<ul class="search"/>').appendTo(this.out); $('#search-progress').text(_('Preparing search...')); this.startPulse(); // index already loaded, the browser was quick! if (this.hasIndex()) this.query(query); else this.deferQuery(query); }, /** * execute search (requires search index to be loaded) */ query : function(query) { var i; // stem the searchterms and add them to the correct list var stemmer = new Stemmer(); var searchterms = []; var excluded = []; var hlterms = []; var tmp = splitQuery(query); var objectterms = []; for (i = 0; i < tmp.length; i++) { if (tmp[i] !== "") { objectterms.push(tmp[i].toLowerCase()); } if ($u.indexOf(stopwords, tmp[i].toLowerCase()) != -1 || tmp[i] === "") { // skip this "word" continue; } // stem the word var word = stemmer.stemWord(tmp[i].toLowerCase()); // prevent stemmer from cutting word smaller than two chars if(word.length < 3 && tmp[i].length >= 3) { word = tmp[i]; } var toAppend; // select the correct list if (word[0] == '-') { toAppend = excluded; word = word.substr(1); } else { toAppend = searchterms; hlterms.push(tmp[i].toLowerCase()); } // only add if not already in the list if (!$u.contains(toAppend, word)) toAppend.push(word); } var highlightstring = '?highlight=' + $.urlencode(hlterms.join(" ")); // console.debug('SEARCH: searching for:'); // console.info('required: ', searchterms); // console.info('excluded: ', excluded); // prepare search var terms = this._index.terms; var titleterms = this._index.titleterms; // array of [filename, title, anchor, descr, score] var results = []; $('#search-progress').empty(); // lookup as object for (i = 0; i < objectterms.length; i++) { var others = [].concat(objectterms.slice(0, i), objectterms.slice(i+1, objectterms.length)); results = results.concat(this.performObjectSearch(objectterms[i], others)); } // lookup as search terms in fulltext results = results.concat(this.performTermsSearch(searchterms, excluded, terms, titleterms)); // let the scorer override scores with a custom scoring function if (Scorer.score) { for (i = 0; i < results.length; i++) results[i][4] = Scorer.score(results[i]); } // now sort the results by score (in opposite order of appearance, since the // display function below uses pop() to retrieve items) and then // alphabetically results.sort(function(a, b) { var left = a[4]; var right = b[4]; if (left > right) { return 1; } else if (left < right) { return -1; } else { // same score: sort alphabetically left = a[1].toLowerCase(); right = b[1].toLowerCase(); return (left > right) ? -1 : ((left < right) ? 1 : 0); } }); // for debugging //Search.lastresults = results.slice(); // a copy //console.info('search results:', Search.lastresults); // print the results var resultCount = results.length; function displayNextItem() { // results left, load the summary and display it if (results.length) { var item = results.pop(); var listItem = $('<li></li>'); var requestUrl = ""; var linkUrl = ""; if (DOCUMENTATION_OPTIONS.BUILDER === 'dirhtml') { // dirhtml builder var dirname = item[0] + '/'; if (dirname.match(/\/index\/$/)) { dirname = dirname.substring(0, dirname.length-6); } else if (dirname == 'index/') { dirname = ''; } requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + dirname; linkUrl = requestUrl; } else { // normal html builders requestUrl = DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX; linkUrl = item[0] + DOCUMENTATION_OPTIONS.LINK_SUFFIX; } listItem.append($('<a/>').attr('href', linkUrl + highlightstring + item[2]).html(item[1])); if (item[3]) { listItem.append($('<span> (' + item[3] + ')</span>')); Search.output.append(listItem); setTimeout(function() { displayNextItem(); }, 5); } else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) { $.ajax({url: requestUrl, dataType: "text", complete: function(jqxhr, textstatus) { var data = jqxhr.responseText; if (data !== '' && data !== undefined) { var summary = Search.makeSearchSummary(data, searchterms, hlterms); if (summary) { listItem.append(summary); } } Search.output.append(listItem); setTimeout(function() { displayNextItem(); }, 5); }}); } else { // no source available, just display title Search.output.append(listItem); setTimeout(function() { displayNextItem(); }, 5); } } // search finished, update title and status message else { Search.stopPulse(); Search.title.text(_('Search Results')); if (!resultCount) Search.status.text(_('Your search did not match any documents. Please make sure that all words are spelled correctly and that you\'ve selected enough categories.')); else Search.status.text(_('Search finished, found %s page(s) matching the search query.').replace('%s', resultCount)); Search.status.fadeIn(500); } } displayNextItem(); }, /** * search for object names */ performObjectSearch : function(object, otherterms) { var filenames = this._index.filenames; var docnames = this._index.docnames; var objects = this._index.objects; var objnames = this._index.objnames; var titles = this._index.titles; var i; var results = []; for (var prefix in objects) { for (var iMatch = 0; iMatch != objects[prefix].length; ++iMatch) { var match = objects[prefix][iMatch]; var name = match[4]; var fullname = (prefix ? prefix + '.' : '') + name; var fullnameLower = fullname.toLowerCase() if (fullnameLower.indexOf(object) > -1) { var score = 0; var parts = fullnameLower.split('.'); // check for different match types: exact matches of full name or // "last name" (i.e. last dotted part) if (fullnameLower == object || parts[parts.length - 1] == object) { score += Scorer.objNameMatch; // matches in last name } else if (parts[parts.length - 1].indexOf(object) > -1) { score += Scorer.objPartialMatch; } var objname = objnames[match[1]][2]; var title = titles[match[0]]; // If more than one term searched for, we require other words to be // found in the name/title/description if (otherterms.length > 0) { var haystack = (prefix + ' ' + name + ' ' + objname + ' ' + title).toLowerCase(); var allfound = true; for (i = 0; i < otherterms.length; i++) { if (haystack.indexOf(otherterms[i]) == -1) { allfound = false; break; } } if (!allfound) { continue; } } var descr = objname + _(', in ') + title; var anchor = match[3]; if (anchor === '') anchor = fullname; else if (anchor == '-') anchor = objnames[match[1]][1] + '-' + fullname; // add custom score for some objects according to scorer if (Scorer.objPrio.hasOwnProperty(match[2])) { score += Scorer.objPrio[match[2]]; } else { score += Scorer.objPrioDefault; } results.push([docnames[match[0]], fullname, '#'+anchor, descr, score, filenames[match[0]]]); } } } return results; }, /** * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions */ escapeRegExp : function(string) { return string.replace(/[.*+\-?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string }, /** * search for full-text terms in the index */ performTermsSearch : function(searchterms, excluded, terms, titleterms) { var docnames = this._index.docnames; var filenames = this._index.filenames; var titles = this._index.titles; var i, j, file; var fileMap = {}; var scoreMap = {}; var results = []; // perform the search on the required terms for (i = 0; i < searchterms.length; i++) { var word = searchterms[i]; var files = []; var _o = [ {files: terms[word], score: Scorer.term}, {files: titleterms[word], score: Scorer.title} ]; // add support for partial matches if (word.length > 2) { var word_regex = this.escapeRegExp(word); for (var w in terms) { if (w.match(word_regex) && !terms[word]) { _o.push({files: terms[w], score: Scorer.partialTerm}) } } for (var w in titleterms) { if (w.match(word_regex) && !titleterms[word]) { _o.push({files: titleterms[w], score: Scorer.partialTitle}) } } } // no match but word was a required one if ($u.every(_o, function(o){return o.files === undefined;})) { break; } // found search word in contents $u.each(_o, function(o) { var _files = o.files; if (_files === undefined) return if (_files.length === undefined) _files = [_files]; files = files.concat(_files); // set score for the word in each file to Scorer.term for (j = 0; j < _files.length; j++) { file = _files[j]; if (!(file in scoreMap)) scoreMap[file] = {}; scoreMap[file][word] = o.score; } }); // create the mapping for (j = 0; j < files.length; j++) { file = files[j]; if (file in fileMap && fileMap[file].indexOf(word) === -1) fileMap[file].push(word); else fileMap[file] = [word]; } } // now check if the files don't contain excluded terms for (file in fileMap) { var valid = true; // check if all requirements are matched var filteredTermCount = // as search terms with length < 3 are discarded: ignore searchterms.filter(function(term){return term.length > 2}).length if ( fileMap[file].length != searchterms.length && fileMap[file].length != filteredTermCount ) continue; // ensure that none of the excluded terms is in the search result for (i = 0; i < excluded.length; i++) { if (terms[excluded[i]] == file || titleterms[excluded[i]] == file || $u.contains(terms[excluded[i]] || [], file) || $u.contains(titleterms[excluded[i]] || [], file)) { valid = false; break; } } // if we have still a valid result we can add it to the result list if (valid) { // select one (max) score for the file. // for better ranking, we should calculate ranking by using words statistics like basic tf-idf... var score = $u.max($u.map(fileMap[file], function(w){return scoreMap[file][w]})); results.push([docnames[file], titles[file], '', null, score, filenames[file]]); } } return results; }, /** * helper function to return a node containing the * search summary for a given text. keywords is a list * of stemmed words, hlwords is the list of normal, unstemmed * words. the first one is used to find the occurrence, the * latter for highlighting it. */ makeSearchSummary : function(htmlText, keywords, hlwords) { var text = Search.htmlToText(htmlText); if (text == "") { return null; } var textLower = text.toLowerCase(); var start = 0; $.each(keywords, function() { var i = textLower.indexOf(this.toLowerCase()); if (i > -1) start = i; }); start = Math.max(start - 120, 0); var excerpt = ((start > 0) ? '...' : '') + $.trim(text.substr(start, 240)) + ((start + 240 - text.length) ? '...' : ''); var rv = $('<p class="context"></p>').text(excerpt); $.each(hlwords, function() { rv = rv.highlightText(this, 'highlighted'); }); return rv; } }; $(document).ready(function() { Search.init(); });
PypiClean
/Flask%20of%20Cinema-1.0.0.tar.gz/Flask of Cinema-1.0.0/static/js/waves.js
;(function(window) { 'use strict'; var Waves = Waves || {}; var $$ = document.querySelectorAll.bind(document); // Find exact position of element function isWindow(obj) { return obj !== null && obj === obj.window; } function getWindow(elem) { return isWindow(elem) ? elem : elem.nodeType === 9 && elem.defaultView; } function offset(elem) { var docElem, win, box = {top: 0, left: 0}, doc = elem && elem.ownerDocument; docElem = doc.documentElement; if (typeof elem.getBoundingClientRect !== typeof undefined) { box = elem.getBoundingClientRect(); } win = getWindow(doc); return { top: box.top + win.pageYOffset - docElem.clientTop, left: box.left + win.pageXOffset - docElem.clientLeft }; } function convertStyle(obj) { var style = ''; for (var a in obj) { if (obj.hasOwnProperty(a)) { style += (a + ':' + obj[a] + ';'); } } return style; } var Effect = { // Effect delay duration: 750, show: function(e, element) { // Disable right click if (e.button === 2) { return false; } var el = element || this; // Create ripple var ripple = document.createElement('div'); ripple.className = 'waves-ripple'; el.appendChild(ripple); // Get click coordinate and element witdh var pos = offset(el); var relativeY = (e.pageY - pos.top); var relativeX = (e.pageX - pos.left); var scale = 'scale('+((el.clientWidth / 100) * 10)+')'; // Support for touch devices if ('touches' in e) { relativeY = (e.touches[0].pageY - pos.top); relativeX = (e.touches[0].pageX - pos.left); } // Attach data to element ripple.setAttribute('data-hold', Date.now()); ripple.setAttribute('data-scale', scale); ripple.setAttribute('data-x', relativeX); ripple.setAttribute('data-y', relativeY); // Set ripple position var rippleStyle = { 'top': relativeY+'px', 'left': relativeX+'px' }; ripple.className = ripple.className + ' waves-notransition'; ripple.setAttribute('style', convertStyle(rippleStyle)); ripple.className = ripple.className.replace('waves-notransition', ''); // Scale the ripple rippleStyle['-webkit-transform'] = scale; rippleStyle['-moz-transform'] = scale; rippleStyle['-ms-transform'] = scale; rippleStyle['-o-transform'] = scale; rippleStyle.transform = scale; rippleStyle.opacity = '1'; rippleStyle['-webkit-transition-duration'] = Effect.duration + 'ms'; rippleStyle['-moz-transition-duration'] = Effect.duration + 'ms'; rippleStyle['-o-transition-duration'] = Effect.duration + 'ms'; rippleStyle['transition-duration'] = Effect.duration + 'ms'; rippleStyle['-webkit-transition-timing-function'] = 'cubic-bezier(0.250, 0.460, 0.450, 0.940)'; rippleStyle['-moz-transition-timing-function'] = 'cubic-bezier(0.250, 0.460, 0.450, 0.940)'; rippleStyle['-o-transition-timing-function'] = 'cubic-bezier(0.250, 0.460, 0.450, 0.940)'; rippleStyle['transition-timing-function'] = 'cubic-bezier(0.250, 0.460, 0.450, 0.940)'; ripple.setAttribute('style', convertStyle(rippleStyle)); }, hide: function(e) { TouchHandler.touchup(e); var el = this; var width = el.clientWidth * 1.4; // Get first ripple var ripple = null; var ripples = el.getElementsByClassName('waves-ripple'); if (ripples.length > 0) { ripple = ripples[ripples.length - 1]; } else { return false; } var relativeX = ripple.getAttribute('data-x'); var relativeY = ripple.getAttribute('data-y'); var scale = ripple.getAttribute('data-scale'); // Get delay beetween mousedown and mouse leave var diff = Date.now() - Number(ripple.getAttribute('data-hold')); var delay = 350 - diff; if (delay < 0) { delay = 0; } // Fade out ripple after delay setTimeout(function() { var style = { 'top': relativeY+'px', 'left': relativeX+'px', 'opacity': '0', // Duration '-webkit-transition-duration': Effect.duration + 'ms', '-moz-transition-duration': Effect.duration + 'ms', '-o-transition-duration': Effect.duration + 'ms', 'transition-duration': Effect.duration + 'ms', '-webkit-transform': scale, '-moz-transform': scale, '-ms-transform': scale, '-o-transform': scale, 'transform': scale, }; ripple.setAttribute('style', convertStyle(style)); setTimeout(function() { try { el.removeChild(ripple); } catch(e) { return false; } }, Effect.duration); }, delay); }, // Little hack to make <input> can perform waves effect wrapInput: function(elements) { for (var a = 0; a < elements.length; a++) { var el = elements[a]; if (el.tagName.toLowerCase() === 'input') { var parent = el.parentNode; // If input already have parent just pass through if (parent.tagName.toLowerCase() === 'i' && parent.className.indexOf('waves-effect') !== -1) { continue; } // Put element class and style to the specified parent var wrapper = document.createElement('i'); wrapper.className = el.className + ' waves-input-wrapper'; var elementStyle = el.getAttribute('style'); if (!elementStyle) { elementStyle = ''; } wrapper.setAttribute('style', elementStyle); el.className = 'waves-button-input'; el.removeAttribute('style'); // Put element as child parent.replaceChild(wrapper, el); wrapper.appendChild(el); } } } }; /** * Disable mousedown event for 500ms during and after touch */ var TouchHandler = { /* uses an integer rather than bool so there's no issues with * needing to clear timeouts if another touch event occurred * within the 500ms. Cannot mouseup between touchstart and * touchend, nor in the 500ms after touchend. */ touches: 0, allowEvent: function(e) { var allow = true; if (e.type === 'touchstart') { TouchHandler.touches += 1; //push } else if (e.type === 'touchend' || e.type === 'touchcancel') { setTimeout(function() { if (TouchHandler.touches > 0) { TouchHandler.touches -= 1; //pop after 500ms } }, 500); } else if (e.type === 'mousedown' && TouchHandler.touches > 0) { allow = false; } return allow; }, touchup: function(e) { TouchHandler.allowEvent(e); } }; /** * Delegated click handler for .waves-effect element. * returns null when .waves-effect element not in "click tree" */ function getWavesEffectElement(e) { if (TouchHandler.allowEvent(e) === false) { return null; } var element = null; var target = e.target || e.srcElement; while (target.parentNode !== null) { if (!(target instanceof SVGElement) && target.className.indexOf('waves-effect') !== -1) { element = target; break; } target = target.parentNode; } return element; } /** * Bubble the click and show effect if .waves-effect elem was found */ function showEffect(e) { var element = getWavesEffectElement(e); if (element !== null) { Effect.show(e, element); if ('ontouchstart' in window) { element.addEventListener('touchend', Effect.hide, false); element.addEventListener('touchcancel', Effect.hide, false); } element.addEventListener('mouseup', Effect.hide, false); element.addEventListener('mouseleave', Effect.hide, false); element.addEventListener('dragend', Effect.hide, false); } } Waves.displayEffect = function(options) { options = options || {}; if ('duration' in options) { Effect.duration = options.duration; } //Wrap input inside <i> tag Effect.wrapInput($$('.waves-effect')); if ('ontouchstart' in window) { document.body.addEventListener('touchstart', showEffect, false); } document.body.addEventListener('mousedown', showEffect, false); }; /** * Attach Waves to an input element (or any element which doesn't * bubble mouseup/mousedown events). * Intended to be used with dynamically loaded forms/inputs, or * where the user doesn't want a delegated click handler. */ Waves.attach = function(element) { //FUTURE: automatically add waves classes and allow users // to specify them with an options param? Eg. light/classic/button if (element.tagName.toLowerCase() === 'input') { Effect.wrapInput([element]); element = element.parentNode; } if ('ontouchstart' in window) { element.addEventListener('touchstart', showEffect, false); } element.addEventListener('mousedown', showEffect, false); }; window.Waves = Waves; document.addEventListener('DOMContentLoaded', function() { Waves.displayEffect(); }, false); })(window);
PypiClean
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/minizlib/constants.js
const realZlibConstants = require('zlib').constants || /* istanbul ignore next */ { ZLIB_VERNUM: 4736 } module.exports = Object.freeze(Object.assign(Object.create(null), { Z_NO_FLUSH: 0, Z_PARTIAL_FLUSH: 1, Z_SYNC_FLUSH: 2, Z_FULL_FLUSH: 3, Z_FINISH: 4, Z_BLOCK: 5, Z_OK: 0, Z_STREAM_END: 1, Z_NEED_DICT: 2, Z_ERRNO: -1, Z_STREAM_ERROR: -2, Z_DATA_ERROR: -3, Z_MEM_ERROR: -4, Z_BUF_ERROR: -5, Z_VERSION_ERROR: -6, Z_NO_COMPRESSION: 0, Z_BEST_SPEED: 1, Z_BEST_COMPRESSION: 9, Z_DEFAULT_COMPRESSION: -1, Z_FILTERED: 1, Z_HUFFMAN_ONLY: 2, Z_RLE: 3, Z_FIXED: 4, Z_DEFAULT_STRATEGY: 0, DEFLATE: 1, INFLATE: 2, GZIP: 3, GUNZIP: 4, DEFLATERAW: 5, INFLATERAW: 6, UNZIP: 7, BROTLI_DECODE: 8, BROTLI_ENCODE: 9, Z_MIN_WINDOWBITS: 8, Z_MAX_WINDOWBITS: 15, Z_DEFAULT_WINDOWBITS: 15, Z_MIN_CHUNK: 64, Z_MAX_CHUNK: Infinity, Z_DEFAULT_CHUNK: 16384, Z_MIN_MEMLEVEL: 1, Z_MAX_MEMLEVEL: 9, Z_DEFAULT_MEMLEVEL: 8, Z_MIN_LEVEL: -1, Z_MAX_LEVEL: 9, Z_DEFAULT_LEVEL: -1, BROTLI_OPERATION_PROCESS: 0, BROTLI_OPERATION_FLUSH: 1, BROTLI_OPERATION_FINISH: 2, BROTLI_OPERATION_EMIT_METADATA: 3, BROTLI_MODE_GENERIC: 0, BROTLI_MODE_TEXT: 1, BROTLI_MODE_FONT: 2, BROTLI_DEFAULT_MODE: 0, BROTLI_MIN_QUALITY: 0, BROTLI_MAX_QUALITY: 11, BROTLI_DEFAULT_QUALITY: 11, BROTLI_MIN_WINDOW_BITS: 10, BROTLI_MAX_WINDOW_BITS: 24, BROTLI_LARGE_MAX_WINDOW_BITS: 30, BROTLI_DEFAULT_WINDOW: 22, BROTLI_MIN_INPUT_BLOCK_BITS: 16, BROTLI_MAX_INPUT_BLOCK_BITS: 24, BROTLI_PARAM_MODE: 0, BROTLI_PARAM_QUALITY: 1, BROTLI_PARAM_LGWIN: 2, BROTLI_PARAM_LGBLOCK: 3, BROTLI_PARAM_DISABLE_LITERAL_CONTEXT_MODELING: 4, BROTLI_PARAM_SIZE_HINT: 5, BROTLI_PARAM_LARGE_WINDOW: 6, BROTLI_PARAM_NPOSTFIX: 7, BROTLI_PARAM_NDIRECT: 8, BROTLI_DECODER_RESULT_ERROR: 0, BROTLI_DECODER_RESULT_SUCCESS: 1, BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT: 2, BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT: 3, BROTLI_DECODER_PARAM_DISABLE_RING_BUFFER_REALLOCATION: 0, BROTLI_DECODER_PARAM_LARGE_WINDOW: 1, BROTLI_DECODER_NO_ERROR: 0, BROTLI_DECODER_SUCCESS: 1, BROTLI_DECODER_NEEDS_MORE_INPUT: 2, BROTLI_DECODER_NEEDS_MORE_OUTPUT: 3, BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_NIBBLE: -1, BROTLI_DECODER_ERROR_FORMAT_RESERVED: -2, BROTLI_DECODER_ERROR_FORMAT_EXUBERANT_META_NIBBLE: -3, BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_ALPHABET: -4, BROTLI_DECODER_ERROR_FORMAT_SIMPLE_HUFFMAN_SAME: -5, BROTLI_DECODER_ERROR_FORMAT_CL_SPACE: -6, BROTLI_DECODER_ERROR_FORMAT_HUFFMAN_SPACE: -7, BROTLI_DECODER_ERROR_FORMAT_CONTEXT_MAP_REPEAT: -8, BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_1: -9, BROTLI_DECODER_ERROR_FORMAT_BLOCK_LENGTH_2: -10, BROTLI_DECODER_ERROR_FORMAT_TRANSFORM: -11, BROTLI_DECODER_ERROR_FORMAT_DICTIONARY: -12, BROTLI_DECODER_ERROR_FORMAT_WINDOW_BITS: -13, BROTLI_DECODER_ERROR_FORMAT_PADDING_1: -14, BROTLI_DECODER_ERROR_FORMAT_PADDING_2: -15, BROTLI_DECODER_ERROR_FORMAT_DISTANCE: -16, BROTLI_DECODER_ERROR_DICTIONARY_NOT_SET: -19, BROTLI_DECODER_ERROR_INVALID_ARGUMENTS: -20, BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MODES: -21, BROTLI_DECODER_ERROR_ALLOC_TREE_GROUPS: -22, BROTLI_DECODER_ERROR_ALLOC_CONTEXT_MAP: -25, BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_1: -26, BROTLI_DECODER_ERROR_ALLOC_RING_BUFFER_2: -27, BROTLI_DECODER_ERROR_ALLOC_BLOCK_TYPE_TREES: -30, BROTLI_DECODER_ERROR_UNREACHABLE: -31, }, realZlibConstants))
PypiClean
/KD_Lib-0.0.32.tar.gz/KD_Lib-0.0.32/docs/usage/tutorials/VirtualTeacher.rst
=========================================== Virtual Teacher using KD_Lib =========================================== `Paper <https://arxiv.org/abs/1909.11723>`_ * A teacher is designed with 100% accuracy using label smoothening regularization * The teacher model which outputs distribution for classes as the following - .. image:: ../../assets/VT.png :width: 400 where K is the total number of classes, c is the correct label and a is the correct probability for the correct class To use the virtual teacher algorithm with the correct classes assigned probabilities of 0.9 - .. code-block:: python import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms from KD_Lib.KD import VirtualTeacher # Define datasets, dataloaders, models and optimizers train_loader = torch.utils.data.DataLoader( datasets.MNIST( "mnist_data", train=True, download=True, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ), batch_size=32, shuffle=True, ) test_loader = torch.utils.data.DataLoader( datasets.MNIST( "mnist_data", train=False, transform=transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))] ), ), batch_size=32, shuffle=True, ) # Set device to be trained on device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Define student and teacher models student_model = <your model> # Define optimizer student_optimizer = optim.SGD(student_model.parameters(), lr=0.01) # Train using KD_Lib distiller = VirtualTeacher(student_model, train_loader, test_loader, student_optimizer, correct_prob=0.9, device=device) distiller.train_student(epochs=5) # Train the student model distiller.evaluate() # Evaluate the student model
PypiClean
/LiDAR_Lite-0.1.0-py3-none-any.whl/LiDAR_Lite.py
import smbus2 as smbus class LiDAR_Lite(): def __init__(self, addr=0x62): """Creates the LiDAR_Lite object Args: addr (int, optional): the I2C address of the LiDAR-Lite """ self.reg = { "ACQ_COMMAND": 0x00, # Device command "STATUS": 0x01, # System status "SIG_COUNT_VAL": 0x02, # Maximum acquisition count "ACQ_CONFIG_REG": 0x04, # Acquisition mode control "VELOCITY": 0x09, # Velocity measurement output "PEAK_CORR": 0x0c, # Peak value in correlation record "NOISE_PEAK": 0x0d, # Correlation record noise floor "SIGNAL_STRENGTH": 0x0e, # Received signal strength "FULL_DELAY_HIGH": 0x0f, # Distance measurement high byte "FULL_DELAY_LOW": 0x10, # Distance measurement low byte "OUTER_LOOP_COUNT": 0x11, # Burst measurement count control "REF_COUNT_VAL": 0x12, # Reference acquisition count "LAST_DELAY_HIGH": 0x14, # Previous distance measurement high byte "LAST_DELAY_LOW": 0x15, # Previous distance measurement low byte "UNIT_ID_HIGH": 0x16, # Serial number high byte "UNIT_ID_LOW": 0x17, # Serial number low byte "I2C_ID_HIGH": 0x18, # Write serial number high byte for I2C address unlock "I2C_ID_LOW": 0x19, # Write serial number low byte for I2C address unlock "I2C_SEC_ADDR": 0x1a, # Write new I2C address after unlock "THRESHOLD_BYPASS": 0x1c, # Peak detection threshold bypass "I2C_CONFIG": 0x1e, # Default address response control "COMMAND": 0x40, # State command "MEASURE_DELAY": 0x45, # Delay between automatic measurements "PEAK_BCK": 0x4c, # Second largest peak value in correlation record "CORR_DATA": 0x52, # Correlation record data low byte "CORR_DATA_SIGN": 0x53, # Correlation record data high byte "ACQ_SETTINGS": 0x5d, # Correlation record memory bank select "POWER_CONTROL": 0x65, # Power state control "FULL_DELAY": 0x0f, "LAST_DELAY": 0x14, "UNIT_ID": 0x16, "I2C_ID": 0x18, } self.addr = addr self.count = 0 def connect(self, bus): """Connects the internal SMBus instance to an I2C bus Args: bus (int): I2C bus number (i.e. 1 corresponds to /dev/i2c-1) """ self.bus = smbus.SMBus(bus) def wait_until_not_busy(self): """Waits until the LiDAR-Lite is ready for a new command """ status = 1 while status != 0: status = self.read_reg("STATUS") & 0b1 def get_distance(self): """Gets the current LiDAR distance Returns: int: distance (in cm) """ self.write_reg("ACQ_COMMAND", 0x04) self.wait_until_not_busy() return self.read_reg2("FULL_DELAY") def set_maximum_acquisition_count(self, mac=0x80): """The maximum acquisition count limits the number of times the device will integrate acquisitions to find a correlation record peak (from a returned signal), which occurs at long range or with low target reflectivity. This controls the minimum measurement rate and maximum range. The unit-less relationship is roughly as follows: rate = 1/n and range = n^(1/4), where n is the number of acquisitions. Args: mac (int, optional): maximum acquisition count """ self.write_reg("SIG_COUNT_VAL", mac) def set_measurement_quick_termination_detection(self, mqtd=False): """If set, the device will terminate a distance measurement early if it anticipates that the signal peak in the correlation record will reach maximum value. This allows for faster and slightly less accurate operation at strong signal strengths without sacrificing long range performance. Args: mqtd (bool, optional): measurement quick termination detection """ self.set_ACR_bit(3, not mqtd) def set_detection_sensitivity(self, ds=0x00): """The default valid measurement detection algorithm is based on the peak value, signal strength, and noise in the correlation record. This can be overridden to become a simple threshold criterion by setting a non-zero value. Recommended non-default values are 0x20 for higher sensitivity with more frequent erroneous measurements, and 0x60 for reduced sensitivity and fewer erroneous measurements. Args: ds (int, optional): detection sensitivity """ self.write_reg("THRESHOLD_BYPASS", ds) def set_bm_repetition_count(self, rc=0x00): """This controls the number of times the device will retrigger itself. Values 0x00 or 0x01 result in the default one measurement per command. Values 0x02 to 0xfe directly set the repetition count. Value 0xff will enable free running mode after the host device sends an initial measurement command. Args: rc (int, optional): repetition count Raises: ValueError: if the repetition count is out of range """ if rc < 0x00 or rc > 0xff: raise ValueError("Repetition count out of range 0x00 - 0xff") self.write_reg("OUTER_LOOP_COUNT", rc) def set_bm_delay(self, delay=0x14): """This sets the default delay between automatic measurements. The default delay (0xc8) corresponds to a 10 Hz repetition rate. A delay value of 0x14 roughly corresponds to 100Hz. Args: delay (int, optional): delay between automatic measurements """ self.set_ACR_bit(5, True) self.write_reg("MEASURE_DELAY", delay) def reset_bm_delay(self): """This resets the delay between automatic measurements to the default delay of 10 Hz (0xc8). """ self.set_bm_delay() self.set_ACR_bit(5, False) def get_velocity(self): """The velocity measurement is the difference between the current measurement and the previous one, resulting in a signed (2’s complement) 8-bit number in cm. Positive velocity is away from the device. This can be combined with free running mode for a constant measurement frequency. The default free running frequency of 10 Hz therefore results in a velocity measurement in .1 m/s. Returns: int: velocity """ vel = self.read_reg("VELOCITY") return signed(vel) def change_I2C_address(self, addr=0x62): """The I2C address can be changed from its default value. Available addresses are 7-bit values with a ‘0’ in the least significant bit (even hexadecimal numbers). Args: addr (int, optional): new I2C address Raises: ValueError: if I2C address is invalid """ if addr & 0b1 != 0: raise ValueError("Least significant bit is not 0") if addr >> 7 != 0: raise ValueError("Address is greater than 7 bits") sn = self.read_reg2("UNIT_ID") self.write_reg2("I2C_ID", sn) self.write_reg("I2C_SEC_ADDR", addr) self.addr = addr self.write_reg("I2C_CONFIG", 0x08) def set_power_control(self, disable_rc=False, device_sleep=False): """Disabling the receiver circuit saves roughly 40mA. After being re-enabled, the receiver circuit stabilizes by the time a measurement can be performed. Putting the device in sleep mode until the next I2C transaction saves 20mA. Wake-up time is only around 2 m/s shorter than the full power-on time. Both will reset all registers. Args: disable_rc (bool, optional): disable receiver circuit device_sleep (bool, optional): put the device in sleep mode """ pc = 0 if disable_rc: pc |= 0b001 if device_sleep: pc |= 0b100 self.write_reg("POWER_CONTROL", pc) def set_ACR_bit(self, bit, val=True): """Sets the specified bit in ACQ_CONFIG_REG to the specified value. Args: bit (int): the bit to set val (bool, optional): the value to set the bit to """ acr = self.read_reg("ACQ_CONFIG_REG") if val: acr |= 1 << bit else: acr &= 0b1111111 - (1 << bit) self.write_reg("ACQ_CONFIG_REG", acr) def read_reg(self, reg): """Reads a specified register from the LiDAR Args: reg (string): the name of the register (contained in self.reg) Returns: byte: the value of the register (8 bits) """ return self.bus.read_byte_data(self.addr, self.reg[reg]) def read_reg2(self, reg): """Reads 2 registers from the LiDAR Args: reg (string): the name of the first register (contained in self.reg) Returns: byte: the value of both registers (16 bits) """ high_byte = self.bus.read_byte_data(self.addr, self.reg[reg]) low_byte = self.bus.read_byte_data(self.addr, self.reg[reg] + 1) return (high_byte << 8) + low_byte def write_reg(self, reg, val): """Writes a specified value into a specified register Args: reg (string): the name of the register (contained in self.reg) val (byte): the value to write to the register (8 bits) """ self.bus.write_byte_data(self.addr, self.reg[reg], val) def write_reg2(self, reg, val): """Writes a specified value into 2 registers Args: reg (string): the name of the first register (contained in self.reg) val (byte): the value to write into both registers (16 bits) """ self.bus.write_byte_data(self.addr, self.reg[reg], val >> 8) self.bus.write_byte_data(self.addr, self.reg[reg] + 1, val & 0b11111111) def signed(self, val): """Converts a signed 8 bit value into a signed number Args: val (byte): 8-bit signed value Returns: int: signed number """ if val > 0b01111111: return (0b100000000 - val) * (-1) else: return val
PypiClean
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/cbpp.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv import numpy as np import os import sys from observations.util import maybe_download_and_extract def cbpp(path): """Contagious bovine pleuropneumonia Contagious bovine pleuropneumonia (CBPP) is a major disease of cattle in Africa, caused by a mycoplasma. This dataset describes the serological incidence of CBPP in zebu cattle during a follow-up survey implemented in 15 commercial herds located in the Boji district of Ethiopia. The goal of the survey was to study the within-herd spread of CBPP in newly infected herds. Blood samples were quarterly collected from all animals of these herds to determine their CBPP status. These data were used to compute the serological incidence of CBPP (new cases occurring during a given time period). Some data are missing (lost to follow-up). A data frame with 56 observations on the following 4 variables. `herd` A factor identifying the herd (1 to 15). `incidence` The number of new serological cases for a given herd and time period. `size` A numeric vector describing herd size at the beginning of a given time period. `period` A factor with levels `1` to `4`. Lesnoff, M., Laval, G., Bonnet, P., Abdicho, S., Workalemahu, A., Kifle, D., Peyraud, A., Lancelot, R., Thiaucourt, F. (2004) Within-herd spread of contagious bovine pleuropneumonia in Ethiopian highlands. *Preventive Veterinary Medicine* **64**, 27–40. Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `cbpp.csv`. Returns: Tuple of np.ndarray `x_train` with 56 rows and 4 columns and dictionary `metadata` of column headers (feature names). """ import pandas as pd path = os.path.expanduser(path) filename = 'cbpp.csv' if not os.path.exists(os.path.join(path, filename)): url = 'http://dustintran.com/data/r/lme4/cbpp.csv' maybe_download_and_extract(path, url, save_file_name='cbpp.csv', resume=False) data = pd.read_csv(os.path.join(path, filename), index_col=0, parse_dates=True) x_train = data.values metadata = {'columns': data.columns} return x_train, metadata
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/grid/enhanced/plugins/GridSource.js.uncompressed.js
define("dojox/grid/enhanced/plugins/GridSource", [ "dojo/_base/declare", "dojo/_base/array", "dojo/_base/lang", "dojo/dnd/Source", "./DnD" ], function(declare, array, lang, Source, DnD){ var _joinToArray = function(arrays){ var a = arrays[0]; for(var i = 1; i < arrays.length; ++i){ a = a.concat(arrays[i]); } return a; }; var GridDnDSource = lang.getObject("dojox.grid.enhanced.plugins.GridDnDSource"); return declare("dojox.grid.enhanced.plugins.GridSource", Source, { // summary: // A special source that can accept grid contents. // Only for non-grid widgets or domNodes. accept: ["grid/cells", "grid/rows", "grid/cols", "text"], // insertNodesForGrid: // If you'd like to insert some sort of nodes into your dnd source, turn this on, // and override getCellContent/getRowContent/getColumnContent // to populate the dnd data in your desired format. insertNodesForGrid: false, markupFactory: function(params, node){ cls = lang.getObject("dojox.grid.enhanced.plugins.GridSource"); return new cls(node, params); }, checkAcceptance: function(source, nodes){ if(source instanceof GridDnDSource){ if(nodes[0]){ var item = source.getItem(nodes[0].id); if(item && (array.indexOf(item.type, "grid/rows") >= 0 || array.indexOf(item.type, "grid/cells") >= 0) && !source.dndPlugin._allDnDItemsLoaded()){ return false; } } this.sourcePlugin = source.dndPlugin; } return this.inherited(arguments); }, onDraggingOver: function(){ if(this.sourcePlugin){ this.sourcePlugin._isSource = true; } }, onDraggingOut: function(){ if(this.sourcePlugin){ this.sourcePlugin._isSource = false; } }, onDropExternal: function(source, nodes, copy){ if(source instanceof GridDnDSource){ var ranges = array.map(nodes, function(node){ return source.getItem(node.id).data; }); var item = source.getItem(nodes[0].id); var grid = item.dndPlugin.grid; var type = item.type[0]; var range; try{ switch(type){ case "grid/cells": nodes[0].innerHTML = this.getCellContent(grid, ranges[0].min, ranges[0].max) || ""; this.onDropGridCells(grid, ranges[0].min, ranges[0].max); break; case "grid/rows": range = _joinToArray(ranges); nodes[0].innerHTML = this.getRowContent(grid, range) || ""; this.onDropGridRows(grid, range); break; case "grid/cols": range = _joinToArray(ranges); nodes[0].innerHTML = this.getColumnContent(grid, range) || ""; this.onDropGridColumns(grid, range); break; } if(this.insertNodesForGrid){ this.selectNone(); this.insertNodes(true, [nodes[0]], this.before, this.current); } item.dndPlugin.onDragOut(!copy); }catch(e){ console.warn("GridSource.onDropExternal() error:",e); } }else{ this.inherited(arguments); } }, getCellContent: function(grid, leftTopCell, rightBottomCell){ // summary: // Fill node innerHTML for dnd grid cells. // sample code: // var cells = grid.layout.cells; // var store = grid.store; // var cache = grid._by_idx; // var res = "Grid Cells from " + grid.id + ":<br/>"; // for(var r = leftTopCell.row; r <= rightBottomCell.row; ++r){ // for(var c = leftTopCell.col; c <= rightBottomCell.col; ++c){ // res += store.getValue(cache[r].item, cells[c].field) + ", "; // } // res = res.substring(0, res.length - 2) + ";<br/>"; // } // return res; }, getRowContent: function(grid, rowIndexes){ // summary: // Fill node innerHTML for dnd grid rows. // sample code: // var cells = grid.layout.cells; // var store = grid.store; // var cache = grid._by_idx; // var res = "Grid Rows from " + grid.id + ":<br/>"; // for(var i = 0; i < rowIndexes.length; ++i){ // var r = rowIndexes[i]; // res += "Row " + r + ": "; // for(var j = 0; j < cells.length; ++j){ // if(!cells[j].hidden){ // res += store.getValue(cache[r].item, cells[j].field) + ", "; // } // } // res = res.substring(0, res.length - 2) + ";<br/>"; // } // return res; }, getColumnContent: function(grid, colIndexes){ // summary: // Fill node innerHTML for dnd grid columns. // sample code: // var cells = grid.layout.cells; // var res = "Grid Columns from " + grid.id + ":"; // for(var i = 0; i < colIndexes.length; ++i){ // var c = colIndexes[i]; // res += (cells[c].name || cells[c].field) + ", "; // } // return res.substring(0, res.length - 2); }, onDropGridCells: function(grid, leftTopCell, rightBottomCell){ }, onDropGridRows: function(grid, rowIndexes){ }, onDropGridColumns: function(grid, colIndexes){ } }); });
PypiClean
/DFRobot_EC_PH_ADC-0.1.1.tar.gz/DFRobot_EC_PH_ADC-0.1.1/DFRobot/DFR_ADS1115.py
import smbus import time # Get I2C bus bus = smbus.SMBus(1) # I2C address of the device ADS1115_IIC_ADDRESS0 = 0x48 ADS1115_IIC_ADDRESS1 = 0x49 # ADS1115 Register Map ADS1115_REG_POINTER_CONVERT = 0x00 # Conversion register ADS1115_REG_POINTER_CONFIG = 0x01 # Configuration register ADS1115_REG_POINTER_LOWTHRESH = 0x02 # Lo_thresh register ADS1115_REG_POINTER_HITHRESH = 0x03 # Hi_thresh register # ADS1115 Configuration Register ADS1115_REG_CONFIG_OS_NOEFFECT = 0x00 # No effect ADS1115_REG_CONFIG_OS_SINGLE = 0x80 # Begin a single conversion ADS1115_REG_CONFIG_MUX_DIFF_0_1 = 0x00 # Differential P = AIN0, N = AIN1 (default) ADS1115_REG_CONFIG_MUX_DIFF_0_3 = 0x10 # Differential P = AIN0, N = AIN3 ADS1115_REG_CONFIG_MUX_DIFF_1_3 = 0x20 # Differential P = AIN1, N = AIN3 ADS1115_REG_CONFIG_MUX_DIFF_2_3 = 0x30 # Differential P = AIN2, N = AIN3 ADS1115_REG_CONFIG_MUX_SINGLE_0 = 0x40 # Single-ended P = AIN0, N = GND ADS1115_REG_CONFIG_MUX_SINGLE_1 = 0x50 # Single-ended P = AIN1, N = GND ADS1115_REG_CONFIG_MUX_SINGLE_2 = 0x60 # Single-ended P = AIN2, N = GND ADS1115_REG_CONFIG_MUX_SINGLE_3 = 0x70 # Single-ended P = AIN3, N = GND ADS1115_REG_CONFIG_PGA_6_144V = 0x00 # +/-6.144V range = Gain 2/3 ADS1115_REG_CONFIG_PGA_4_096V = 0x02 # +/-4.096V range = Gain 1 ADS1115_REG_CONFIG_PGA_2_048V = 0x04 # +/-2.048V range = Gain 2 (default) ADS1115_REG_CONFIG_PGA_1_024V = 0x06 # +/-1.024V range = Gain 4 ADS1115_REG_CONFIG_PGA_0_512V = 0x08 # +/-0.512V range = Gain 8 ADS1115_REG_CONFIG_PGA_0_256V = 0x0A # +/-0.256V range = Gain 16 ADS1115_REG_CONFIG_MODE_CONTIN = 0x00 # Continuous conversion mode ADS1115_REG_CONFIG_MODE_SINGLE = 0x01 # Power-down single-shot mode (default) ADS1115_REG_CONFIG_DR_8SPS = 0x00 # 8 samples per second ADS1115_REG_CONFIG_DR_16SPS = 0x20 # 16 samples per second ADS1115_REG_CONFIG_DR_32SPS = 0x40 # 32 samples per second ADS1115_REG_CONFIG_DR_64SPS = 0x60 # 64 samples per second ADS1115_REG_CONFIG_DR_128SPS = 0x80 # 128 samples per second (default) ADS1115_REG_CONFIG_DR_250SPS = 0xA0 # 250 samples per second ADS1115_REG_CONFIG_DR_475SPS = 0xC0 # 475 samples per second ADS1115_REG_CONFIG_DR_860SPS = 0xE0 # 860 samples per second ADS1115_REG_CONFIG_CMODE_TRAD = 0x00 # Traditional comparator with hysteresis (default) ADS1115_REG_CONFIG_CMODE_WINDOW = 0x10 # Window comparator ADS1115_REG_CONFIG_CPOL_ACTVLOW = 0x00 # ALERT/RDY pin is low when active (default) ADS1115_REG_CONFIG_CPOL_ACTVHI = 0x08 # ALERT/RDY pin is high when active ADS1115_REG_CONFIG_CLAT_NONLAT = 0x00 # Non-latching comparator (default) ADS1115_REG_CONFIG_CLAT_LATCH = 0x04 # Latching comparator ADS1115_REG_CONFIG_CQUE_1CONV = 0x00 # Assert ALERT/RDY after one conversions ADS1115_REG_CONFIG_CQUE_2CONV = 0x01 # Assert ALERT/RDY after two conversions ADS1115_REG_CONFIG_CQUE_4CONV = 0x02 # Assert ALERT/RDY after four conversions ADS1115_REG_CONFIG_CQUE_NONE = 0x03 # Disable the comparator and put ALERT/RDY in high state (default) mygain=0x02 coefficient=0.125 addr_G=ADS1115_IIC_ADDRESS0 class ADS1115(): def setGain(self,gain): global mygain global coefficient mygain=gain if mygain == ADS1115_REG_CONFIG_PGA_6_144V: coefficient = 0.1875 elif mygain == ADS1115_REG_CONFIG_PGA_4_096V: coefficient = 0.125 elif mygain == ADS1115_REG_CONFIG_PGA_2_048V: coefficient = 0.0625 elif mygain == ADS1115_REG_CONFIG_PGA_1_024V: coefficient = 0.03125 elif mygain == ADS1115_REG_CONFIG_PGA_0_512V: coefficient = 0.015625 elif mygain == ADS1115_REG_CONFIG_PGA_0_256V: coefficient = 0.0078125 else: coefficient = 0.125 def setAddr_ADS1115(self,addr): global addr_G addr_G=addr def setChannel(self,channel): global mygain """Select the Channel user want to use from 0-3 For Single-ended Output 0 : AINP = AIN0 and AINN = GND 1 : AINP = AIN1 and AINN = GND 2 : AINP = AIN2 and AINN = GND 3 : AINP = AIN3 and AINN = GND For Differential Output 0 : AINP = AIN0 and AINN = AIN1 1 : AINP = AIN0 and AINN = AIN3 2 : AINP = AIN1 and AINN = AIN3 3 : AINP = AIN2 and AINN = AIN3""" self.channel = channel while self.channel > 3 : self.channel = 0 return self.channel def setSingle(self): global addr_G if self.channel == 0: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_SINGLE_0 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 1: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_SINGLE_1 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 2: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_SINGLE_2 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 3: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_SINGLE_3 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] bus.write_i2c_block_data(addr_G, ADS1115_REG_POINTER_CONFIG, CONFIG_REG) def setDifferential(self): global addr_G if self.channel == 0: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_0_1 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 1: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_0_3 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 2: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_1_3 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] elif self.channel == 3: CONFIG_REG = [ADS1115_REG_CONFIG_OS_SINGLE | ADS1115_REG_CONFIG_MUX_DIFF_2_3 | mygain | ADS1115_REG_CONFIG_MODE_CONTIN, ADS1115_REG_CONFIG_DR_128SPS | ADS1115_REG_CONFIG_CQUE_NONE] bus.write_i2c_block_data(addr_G, ADS1115_REG_POINTER_CONFIG, CONFIG_REG) def readValue(self): """Read data back from ADS1115_REG_POINTER_CONVERT(0x00), 2 bytes raw_adc MSB, raw_adc LSB""" global coefficient global addr_G data = bus.read_i2c_block_data(addr_G, ADS1115_REG_POINTER_CONVERT, 2) # Convert the data raw_adc = data[0] * 256 + data[1] if raw_adc > 32767: raw_adc -= 65535 raw_adc = int(float(raw_adc)*coefficient) return {'r' : raw_adc} def readVoltage(self,channel): self.setChannel(channel) self.setSingle() time.sleep(0.1) return self.readValue() def ComparatorVoltage(self,channel): self.setChannel(channel) self.setDifferential() time.sleep(0.1) return self.readValue()
PypiClean
/EasyAdls-0.1.3.tar.gz/EasyAdls-0.1.3/README.md
## EasyAdls Wrapper around the Azure Storage Blobs SDK to make life a bit easier. ### Install `pip install EasyAdls` ### Examples Get a client with either a key or sas token: ``` from EasyAdls import EasyBlob client = EasyBlob(account_name='mystorageaccount', container='some-container', credential='key_or_sas_token') ``` Retrieve properties of a blob: ``` # return properties client.get_properties('blob.jpg') ``` Copy a blob. You can specify another container if needed, default is same container: ``` # copy a blob client.copy_blob('blob.jpg', 'copy_of_blob.jpg') client.copy_blob(source_path='blob.jpg', destination_path='copy_of_blob.jpg', destination_container='anothercontainer') ``` Move-, or rename a blob. You can specify another container if needed, default is same container ``` client.move_or_rename_blob('blob.jpg', 'renamed_blob.jpg') client.move_or_rename_blob(source_path='/path/to/blob.jpg', destination_path='/another/path/to/blob.jpg', destination_container='anothercontainer') ``` Read / write a csv directly into a pandas dataframe. You can pass-down all arguments of `pandas.read_csv()` and `pandas.to_csv()`: ``` # read csv df = client.read_csv_to_pandas('some.csv', header=None, sep=',') # write csv client.write_pandas_to_csv(df, 'another.csv', overwrite=False, index=True) ``` Get a string of bytestring back from a blob: ``` # returns string client.read_blob_to_string('some.csv') # returns bytes client.read_blob_to_bytes('blob.jpg') ``` Write any content into a blob, can be both string or bytestring: ``` # directly write to file client.write_content_to_blob('some.txt', 'some random test string', overwrite=True) ``` Read a text blob into a StringIO object, so you can read it in with e.g., Pandas as if it was on disk: ``` # get StringIO csv_as_string = client.read_textfile_to_io('some.csv') # turn it into a pandas df pd.read_csv(csv_as_string) ``` Read a (binary) blob into a BytesIO object, so you can read it in with e.g., Pandas as if it was on disk ``` # get BytesIO csv_as_bytes = client.read_binary_to_io('some.csv') # turn it into a pandas df pd.read_csv(csv_as_bytes) ``` Upload a local file to blob or vice versa ``` # upload a file client.upload_blob('./some_local.jpg', 'blob.jpg', overwrite=True) # download a file client.download_blob('blob.jpg', './some_local.jpg') ``` ### License None whatsoever ### Author D. Koops
PypiClean
/Newgram-0.0.5.tar.gz/Newgram-0.0.5/newgram/methods/utilities/run.py
import asyncio import inspect import newgram from newgram.methods.utilities.idle import idle class Run: def run( self: "newgram.Client", coroutine=None ): """Start the client, idle the main script and finally stop the client. When calling this method without any argument it acts as a convenience method that calls :meth:`~newgram.Client.start`, :meth:`~newgram.idle` and :meth:`~newgram.Client.stop` in sequence. It makes running a single client less verbose. In case a coroutine is passed as argument, runs the coroutine until it's completed and doesn't do any client operation. This is almost the same as :py:obj:`asyncio.run` except for the fact that Newgram's ``run`` uses the current event loop instead of a new one. If you want to run multiple clients at once, see :meth:`newgram.compose`. Parameters: coroutine (``Coroutine``, *optional*): Pass a coroutine to run it until it completes. Raises: ConnectionError: In case you try to run an already started client. Example: .. code-block:: python from newgram import Client app = Client("my_account") ... # Set handlers up app.run() .. code-block:: python from newgram import Client app = Client("my_account") async def main(): async with app: print(await app.get_me()) app.run(main()) """ loop = asyncio.get_event_loop() run = loop.run_until_complete if coroutine is not None: run(coroutine) else: if inspect.iscoroutinefunction(self.start): run(self.start()) run(idle()) run(self.stop()) else: self.start() run(idle()) self.stop()
PypiClean
/MDSuite-0.2.0-py3-none-any.whl/mdsuite/time_series/base.py
from __future__ import annotations from typing import TYPE_CHECKING import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from mdsuite.database.simulation_database import Database if TYPE_CHECKING: from mdsuite import Experiment def running_mean(x, N): """Perform a rolling window mean""" cumsum = np.cumsum(np.insert(x, 0, 0)) return (cumsum[N:] - cumsum[:-N]) / float(N) class TimeSeries: def __init__(self, experiment: Experiment): """ Parameters ---------- experiment: Experiment The parent experiment class to perform the time series operation on """ self.experiment = experiment self.loaded_property = None self.fig_labels = {"x": None, "y": None} self.species = experiment.species self.rolling_window = 0 self.reduce_sum = True # Properties self._database = None self._data = None def __call__(self, species: list = None, rolling_window: int = 0): if species is not None: self.species = species self.rolling_window = rolling_window self.plot() @property def database(self): """Get the database""" if self._database is None: self._database = Database(self.experiment.database_path / "database.hdf5") return self._database @property def data(self): """Get the data for all species and timesteps for the loaded_property""" if self._data is None: self._data = tf.concat( [ self.database.load_data([f"{species}/{self.loaded_property}"]) for species in self.species ], axis=0, ) return self._data @property def preprocess_data(self): """Perform some data preprocessing before plotting it""" data = self.data if self.reduce_sum: data = tf.einsum("atx -> t", data) # perform a reduce sum over atoms "a" and property dimension "x" to # yield time steps "t" if self.rolling_window > 0: data = running_mean(data, self.rolling_window) return data def plot(self): """Plot the data over timesteps""" fig, ax = plt.subplots() ax.plot(self.preprocess_data) ax.set_xlabel(self.fig_labels["x"]) ax.set_ylabel(self.fig_labels["y"]) fig.show()
PypiClean
/BatteryHorse-1.0.0.tar.gz/BatteryHorse-1.0.0/batteryhorse/encoder.py
import os import sys import argparse from functools import lru_cache import nltk from nltk.corpus import wordnet from .version import __version__ try: from secrets import choice except ImportError: from random import SystemRandom choice = SystemRandom().choice def _filter_words(word): return word.isalpha() and len(word) > 1 @lru_cache() def _get_words(): nltk_path = os.path.join(os.path.dirname(__file__), 'nltk_data') if nltk_path not in nltk.data.path: nltk.data.path.insert(0, nltk_path) verbs = sorted({word.lower() for word in filter( _filter_words, wordnet.all_lemma_names(wordnet.VERB))}) verb_size = len(verbs) nouns = sorted({word.lower() for word in filter( _filter_words, wordnet.all_lemma_names(wordnet.NOUN))}) noun_size = len(nouns) adjs = sorted({word.lower() for word in filter( _filter_words, wordnet.all_lemma_names(wordnet.ADJ))}) adj_size = len(adjs) conjs = sorted(['and', 'or', 'lest', 'till', 'nor', 'but', 'yet', 'so', 'unless', 'when']) conj_size = len(conjs) return (verbs, verb_size, nouns, noun_size, adjs, adj_size, conjs, conj_size) def encode_data(data: bytes) -> str: """Creates a sentence encoding the hashed data given above. The output is one or more sentences with the format Verb Noun Adjective Conjunction Adjective.""" VERBS, VERB_SIZE, NOUNS, NOUN_SIZE, ADJS, ADJ_SIZE, CONJS, CONJ_SIZE = _get_words() sentences = [] sentence = [] value = int.from_bytes(data, byteorder='big', signed=False) while value > 0: if len(sentence) == 0: # Verb value, offset = divmod(value, VERB_SIZE) sentence.append(VERBS[offset].capitalize()) elif len(sentence) == 1: # Noun value, offset = divmod(value, NOUN_SIZE) sentence.append(NOUNS[offset]) elif len(sentence) == 3: # Conjunction value, offset = divmod(value, CONJ_SIZE) sentence.append(CONJS[offset]) elif len(sentence) in (2, 4): # Adjective value, offset = divmod(value, ADJ_SIZE) sentence.append(ADJS[offset]) elif len(sentence) == 5: # Sentence break sentences.append(' '.join(sentence)) sentence = [] sentences.append(' '.join(sentence)) return '. '.join(sentences).strip() def decode_data(string: str, length: int) -> bytes: """Extract the hash of the encoded data from the given string of sentences created with encode_data.""" VERBS, VERB_SIZE, NOUNS, NOUN_SIZE, ADJS, ADJ_SIZE, CONJS, CONJ_SIZE = _get_words() parts = [ (ADJS, ADJ_SIZE), (CONJS, CONJ_SIZE), (ADJS, ADJ_SIZE), (NOUNS, NOUN_SIZE), (VERBS, VERB_SIZE) ] sentences = string.lower().split('.') sentences.reverse() value = 0 for sentence in sentences: words = sentence.split() words.reverse() # Start at an offset if necessary if we do not have a full sentence (ie: partial block) max_parts = len(parts) - len(words) for n, word in enumerate(words, start=max_parts): word = word.strip() dictionary, size = parts[n] index = dictionary.index(word) value = index + (value * size) return value.to_bytes(length=length, byteorder='big', signed=False) def create_secret(size=3): """Creates a random sentence that can be used as a passphrase""" VERBS, VERB_SIZE, NOUNS, NOUN_SIZE, ADJS, ADJ_SIZE, CONJS, CONJ_SIZE = _get_words() words = [] for _ in range(size): words.append(choice(NOUNS + VERBS)) return ' '.join(words).capitalize() def main(): """Run Batteryhorse in the terminal""" parser = argparse.ArgumentParser( prog="batteryhorse", description="Encode and decode data as sentences") parser.add_argument('--encode', action='store_true', help='Accept data to be encoded from STDIN') parser.add_argument('--decode', action='store_true', help='Accept data to be decoded from STDIN') parser.add_argument('--generate', action='store_true', help='Generate a random secret') parser.add_argument( '--length', help='Specify the length of secret or data to be decoded', default=20, type=int ) parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) args = parser.parse_args() if args.encode: data = sys.stdin.read() print(encode_data(data.encode('ascii'))) elif args.decode: data = sys.stdin.read() print(decode_data(data, args.length).decode('ascii')) elif args.generate: print(create_secret(args.length)) else: print(parser.print_usage()) if __name__ == '__main__': main()
PypiClean
/HOPP-0.0.5-py3-none-any.whl/tools/optimization/optimizer/ask_tell_optimizer.py
from abc import abstractmethod from typing import ( Optional, Tuple, ) from ..data_logging.data_recorder import DataRecorder class AskTellOptimizer: """ An Ask-Tell structured optimizer, following the recommendations from Collette, Y., N. Hansen, G. Pujol, D. Salazar Aponte and R. Le Riche (2010). On Object-Oriented Programming of Optimizers - Examples in Scilab. In P. Breitkopf and R. F. Coelho, eds.: Multidisciplinary Design Optimization in Computational Mechanics, Wiley, pp. 527-565; http://www.cmap.polytechnique.fr/~nikolaus.hansen/collette2010Chap14.pdf Example usage: while not opt.stop(): x = opt.ask() y = f(x) opt.tell(x, y) return opt.best() """ @abstractmethod def setup(self, dimensions: [any], recorder: DataRecorder) -> None: """ Setup parameters given initial conditions of the candidate :param dimensions: list of search dimensions :param recorder: data recorder """ pass @abstractmethod def stop(self) -> bool: """ :return: True when the optimizer thinks it has reached a stopping point """ pass @abstractmethod def ask(self, num: Optional[int] = None) -> [any]: """ :param num: the number of search points to return. If undefined, the optimizer will choose how many to return. :return: a list of search points generated by the optimizer """ pass @abstractmethod def tell(self, evaluations: [Tuple[float, float, any]]) -> None: """ Updates the optimizer with the objective evaluations of a list of search points :param evaluations: a list of tuples of (evaluation, search point) """ pass @abstractmethod def best_solution(self) -> Optional[Tuple[float, float, any]]: """ :return: the current best solution and (estimated) score """ pass @abstractmethod def central_solution(self) -> (Optional[float], Optional[float], any): """ :return: the current central solution and (estimated) score """ pass def get_num_candidates(self) -> Optional[int]: """ :return: Suggested number of candidates to ask for (for parallel asking), or None for no suggestion """ return None def get_candidate_block_size(self) -> int: """ :return: number of candidates requested should be a multiple of this quantity """ return 1 def get_num_dimensions(self) -> Optional[int]: """ :return: number of dimensions being optimized over, or None if not implemented or applicable """ return None
PypiClean
/ApiLogicServer-9.2.18-py3-none-any.whl/api_logic_server_cli/create_from_model/safrs-react-admin-npm-build/static/js/6304.ce792f5f.chunk.js
"use strict";(self.webpackChunkreact_admin_upgrade=self.webpackChunkreact_admin_upgrade||[]).push([[6304],{86304:function(e,t,o){o.r(t),o.d(t,{conf:function(){return n},language:function(){return i}});var n={comments:{lineComment:"//",blockComment:["/*","*/"]},brackets:[["{","}"],["[","]"],["(",")"]],autoClosingPairs:[{open:"{",close:"}"},{open:"[",close:"]"},{open:"(",close:")"},{open:'"',close:'"'},{open:"'",close:"'"},{open:"`",close:"`"}],surroundingPairs:[{open:"{",close:"}"},{open:"[",close:"]"},{open:"(",close:")"},{open:'"',close:'"'},{open:"'",close:"'"},{open:"`",close:"`"}]},i={defaultToken:"",tokenPostfix:".swift",identifier:/[a-zA-Z_][\w$]*/,attributes:["@GKInspectable","@IBAction","@IBDesignable","@IBInspectable","@IBOutlet","@IBSegueAction","@NSApplicationMain","@NSCopying","@NSManaged","@Sendable","@UIApplicationMain","@autoclosure","@actorIndependent","@asyncHandler","@available","@convention","@derivative","@differentiable","@discardableResult","@dynamicCallable","@dynamicMemberLookup","@escaping","@frozen","@globalActor","@inlinable","@inline","@main","@noDerivative","@nonobjc","@noreturn","@objc","@objcMembers","@preconcurrency","@propertyWrapper","@requires_stored_property_inits","@resultBuilder","@testable","@unchecked","@unknown","@usableFromInline","@warn_unqualified_access"],accessmodifiers:["open","public","internal","fileprivate","private"],keywords:["#available","#colorLiteral","#column","#dsohandle","#else","#elseif","#endif","#error","#file","#fileID","#fileLiteral","#filePath","#function","#if","#imageLiteral","#keyPath","#line","#selector","#sourceLocation","#warning","Any","Protocol","Self","Type","actor","as","assignment","associatedtype","associativity","async","await","break","case","catch","class","continue","convenience","default","defer","deinit","didSet","do","dynamic","dynamicType","else","enum","extension","fallthrough","false","fileprivate","final","for","func","get","guard","higherThan","if","import","in","indirect","infix","init","inout","internal","is","isolated","lazy","left","let","lowerThan","mutating","nil","none","nonisolated","nonmutating","open","operator","optional","override","postfix","precedence","precedencegroup","prefix","private","protocol","public","repeat","required","rethrows","return","right","safe","self","set","some","static","struct","subscript","super","switch","throw","throws","true","try","typealias","unowned","unsafe","var","weak","where","while","willSet","__consuming","__owned"],symbols:/[=(){}\[\].,:;@#\_&\-<>`?!+*\\\/]/,operatorstart:/[\/=\-+!*%<>&|^~?\u00A1-\u00A7\u00A9\u00AB\u00AC\u00AE\u00B0-\u00B1\u00B6\u00BB\u00BF\u00D7\u00F7\u2016-\u2017\u2020-\u2027\u2030-\u203E\u2041-\u2053\u2055-\u205E\u2190-\u23FF\u2500-\u2775\u2794-\u2BFF\u2E00-\u2E7F\u3001-\u3003\u3008-\u3030]/,operatorend:/[\u0300-\u036F\u1DC0-\u1DFF\u20D0-\u20FF\uFE00-\uFE0F\uFE20-\uFE2F\uE0100-\uE01EF]/,operators:/(@operatorstart)((@operatorstart)|(@operatorend))*/,escapes:/\\(?:[abfnrtv\\"']|x[0-9A-Fa-f]{1,4}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})/,tokenizer:{root:[{include:"@whitespace"},{include:"@comment"},{include:"@attribute"},{include:"@literal"},{include:"@keyword"},{include:"@invokedmethod"},{include:"@symbol"}],whitespace:[[/\s+/,"white"],[/"""/,"string.quote","@endDblDocString"]],endDblDocString:[[/[^"]+/,"string"],[/\\"/,"string"],[/"""/,"string.quote","@popall"],[/"/,"string"]],symbol:[[/[{}()\[\]]/,"@brackets"],[/[<>](?!@symbols)/,"@brackets"],[/[.]/,"delimiter"],[/@operators/,"operator"],[/@symbols/,"operator"]],comment:[[/\/\/\/.*$/,"comment.doc"],[/\/\*\*/,"comment.doc","@commentdocbody"],[/\/\/.*$/,"comment"],[/\/\*/,"comment","@commentbody"]],commentdocbody:[[/\/\*/,"comment","@commentbody"],[/\*\//,"comment.doc","@pop"],[/\:[a-zA-Z]+\:/,"comment.doc.param"],[/./,"comment.doc"]],commentbody:[[/\/\*/,"comment","@commentbody"],[/\*\//,"comment","@pop"],[/./,"comment"]],attribute:[[/@@@identifier/,{cases:{"@attributes":"keyword.control","@default":""}}]],literal:[[/"/,{token:"string.quote",next:"@stringlit"}],[/0[b]([01]_?)+/,"number.binary"],[/0[o]([0-7]_?)+/,"number.octal"],[/0[x]([0-9a-fA-F]_?)+([pP][\-+](\d_?)+)?/,"number.hex"],[/(\d_?)*\.(\d_?)+([eE][\-+]?(\d_?)+)?/,"number.float"],[/(\d_?)+/,"number"]],stringlit:[[/\\\(/,{token:"operator",next:"@interpolatedexpression"}],[/@escapes/,"string"],[/\\./,"string.escape.invalid"],[/"/,{token:"string.quote",next:"@pop"}],[/./,"string"]],interpolatedexpression:[[/\(/,{token:"operator",next:"@interpolatedexpression"}],[/\)/,{token:"operator",next:"@pop"}],{include:"@literal"},{include:"@keyword"},{include:"@symbol"}],keyword:[[/`/,{token:"operator",next:"@escapedkeyword"}],[/@identifier/,{cases:{"@keywords":"keyword","[A-Z][a-zA-Z0-9$]*":"type.identifier","@default":"identifier"}}]],escapedkeyword:[[/`/,{token:"operator",next:"@pop"}],[/./,"identifier"]],invokedmethod:[[/([.])(@identifier)/,{cases:{$2:["delimeter","type.identifier"],"@default":""}}]]}}}}]); //# sourceMappingURL=6304.ce792f5f.chunk.js.map
PypiClean
/FHIR%20Parser-0.1.5.tar.gz/FHIR Parser-0.1.5/fhir_parser/observation.py
import datetime from typing import List, Union, Optional class ObservationComponent: """An observation component object containing the details of a part of the observation""" def __init__(self, system: str, code: str, display: str, value: Optional[Union[str, float]], unit: Optional[str]): self.system: str = system self.code: str = code self.display: str = display self.value: float = value self.unit: str = unit def quantity(self) -> str: """ Pretty print of the value and unit for an observation Returns: Value and unit, '76.0 mm[Hg]' """ return str(self.value if self.value is not None else 'N/A') + (self.unit if self.unit is not None else '') def __eq__(self, o: object) -> bool: if type(o) != ObservationComponent: return False return self.__dict__ == o.__dict__ def __str__(self) -> str: return self.display + ': ' + str(self.value if self.value is not None else 'N/A') + (self.unit if self.unit is not None else '') class Observation: """An observation object holding either one or more observation components""" def __init__(self, uuid: str, type: str, status: str, patient_uuid: str, encounter_uuid: str, effective_datetime: datetime.datetime, issued_datetime: datetime.datetime, components: List[ObservationComponent]): self.uuid: str = uuid self.type: str = type self.status: str = status self.patient_uuid: str = patient_uuid self.encounter_uuid: str = encounter_uuid self.effective_datetime: datetime.datetime = effective_datetime self.issued_datetime: datetime.datetime = issued_datetime self.components: List[ObservationComponent] = components def __str__(self) -> str: return ' | '.join(map(str, [self.uuid, self.type, self.status, self.effective_datetime, self.issued_datetime, '[' + ', '.join(map(str, self.components)) + ']']))
PypiClean
/MezzanineFor1.7-3.1.10.tar.gz/MezzanineFor1.7-3.1.10/mezzanine/core/models.py
from __future__ import unicode_literals from future.builtins import str from future.utils import with_metaclass from json import loads try: from urllib.request import urlopen from urllib.parse import urlencode except ImportError: from urllib import urlopen, urlencode from django.contrib.contenttypes.generic import GenericForeignKey from django.db import models from django.db.models.base import ModelBase from django.db.models.signals import post_save from django.template.defaultfilters import truncatewords_html from django.utils.encoding import python_2_unicode_compatible from django.utils.html import strip_tags from django.utils.timesince import timesince from django.utils.timezone import now from django.utils.translation import ugettext, ugettext_lazy as _ from mezzanine.core.fields import RichTextField, OrderField from mezzanine.core.managers import DisplayableManager, CurrentSiteManager from mezzanine.generic.fields import KeywordsField from mezzanine.utils.html import TagCloser from mezzanine.utils.models import base_concrete_model, get_user_model_name from mezzanine.utils.sites import current_site_id, current_request from mezzanine.utils.urls import admin_url, slugify, unique_slug user_model_name = get_user_model_name() class SiteRelated(models.Model): """ Abstract model for all things site-related. Adds a foreignkey to Django's ``Site`` model, and filters by site with all querysets. See ``mezzanine.utils.sites.current_site_id`` for implementation details. """ objects = CurrentSiteManager() class Meta: abstract = True site = models.ForeignKey("sites.Site", editable=False) def save(self, update_site=False, *args, **kwargs): """ Set the site to the current site when the record is first created, or the ``update_site`` argument is explicitly set to ``True``. """ if update_site or (self.id is None and self.site_id is None): self.site_id = current_site_id() super(SiteRelated, self).save(*args, **kwargs) @python_2_unicode_compatible class Slugged(SiteRelated): """ Abstract model that handles auto-generating slugs. Each slugged object is also affiliated with a specific site object. """ title = models.CharField(_("Title"), max_length=500) slug = models.CharField(_("URL"), max_length=2000, blank=True, null=True, help_text=_("Leave blank to have the URL auto-generated from " "the title.")) class Meta: abstract = True def __str__(self): return self.title def save(self, *args, **kwargs): """ If no slug is provided, generates one before saving. """ if not self.slug: self.slug = self.generate_unique_slug() super(Slugged, self).save(*args, **kwargs) def generate_unique_slug(self): """ Create a unique slug by passing the result of get_slug() to utils.urls.unique_slug, which appends an index if necessary. """ # For custom content types, use the ``Page`` instance for # slug lookup. concrete_model = base_concrete_model(Slugged, self) slug_qs = concrete_model.objects.exclude(id=self.id) return unique_slug(slug_qs, "slug", self.get_slug()) def get_slug(self): """ Allows subclasses to implement their own slug creation logic. """ return slugify(self.title) def admin_link(self): return "<a href='%s'>%s</a>" % (self.get_absolute_url(), ugettext("View on site")) admin_link.allow_tags = True admin_link.short_description = "" class MetaData(models.Model): """ Abstract model that provides meta data for content. """ _meta_title = models.CharField(_("Title"), null=True, blank=True, max_length=500, help_text=_("Optional title to be used in the HTML title tag. " "If left blank, the main title field will be used.")) description = models.TextField(_("Description"), blank=True) gen_description = models.BooleanField(_("Generate description"), help_text=_("If checked, the description will be automatically " "generated from content. Uncheck if you want to manually " "set a custom description."), default=True) keywords = KeywordsField(verbose_name=_("Keywords")) class Meta: abstract = True def save(self, *args, **kwargs): """ Set the description field on save. """ if self.gen_description: self.description = strip_tags(self.description_from_content()) super(MetaData, self).save(*args, **kwargs) def meta_title(self): """ Accessor for the optional ``_meta_title`` field, which returns the string version of the instance if not provided. """ return self._meta_title or str(self) def description_from_content(self): """ Returns the first block or sentence of the first content-like field. """ description = "" # Use the first RichTextField, or TextField if none found. for field_type in (RichTextField, models.TextField): if not description: for field in self._meta.fields: if (isinstance(field, field_type) and field.name != "description"): description = getattr(self, field.name) if description: from mezzanine.core.templatetags.mezzanine_tags \ import richtext_filters description = richtext_filters(description) break # Fall back to the title if description couldn't be determined. if not description: description = str(self) # Strip everything after the first block or sentence. ends = ("</p>", "<br />", "<br/>", "<br>", "</ul>", "\n", ". ", "! ", "? ") for end in ends: pos = description.lower().find(end) if pos > -1: description = TagCloser(description[:pos]).html break else: description = truncatewords_html(description, 100) return description class TimeStamped(models.Model): """ Provides created and updated timestamps on models. """ class Meta: abstract = True created = models.DateTimeField(null=True, editable=False) updated = models.DateTimeField(null=True, editable=False) def save(self, *args, **kwargs): _now = now() self.updated = _now if not self.id: self.created = _now super(TimeStamped, self).save(*args, **kwargs) CONTENT_STATUS_DRAFT = 1 CONTENT_STATUS_PUBLISHED = 2 CONTENT_STATUS_CHOICES = ( (CONTENT_STATUS_DRAFT, _("Draft")), (CONTENT_STATUS_PUBLISHED, _("Published")), ) SHORT_URL_UNSET = "unset" class Displayable(Slugged, MetaData, TimeStamped): """ Abstract model that provides features of a visible page on the website such as publishing fields. Basis of Mezzanine pages, blog posts, and Cartridge products. """ status = models.IntegerField(_("Status"), choices=CONTENT_STATUS_CHOICES, default=CONTENT_STATUS_PUBLISHED, help_text=_("With Draft chosen, will only be shown for admin users " "on the site.")) publish_date = models.DateTimeField(_("Published from"), help_text=_("With Published chosen, won't be shown until this time"), blank=True, null=True) expiry_date = models.DateTimeField(_("Expires on"), help_text=_("With Published chosen, won't be shown after this time"), blank=True, null=True) short_url = models.URLField(blank=True, null=True) in_sitemap = models.BooleanField(_("Show in sitemap"), default=True) objects = DisplayableManager() search_fields = {"keywords": 10, "title": 5} class Meta: abstract = True def save(self, *args, **kwargs): """ Set default for ``publish_date``. We can't use ``auto_now_add`` on the field as it will be blank when a blog post is created from the quick blog form in the admin dashboard. """ if self.publish_date is None: self.publish_date = now() super(Displayable, self).save(*args, **kwargs) def get_admin_url(self): return admin_url(self, "change", self.id) def publish_date_since(self): """ Returns the time since ``publish_date``. """ return timesince(self.publish_date) publish_date_since.short_description = _("Published from") def get_absolute_url(self): """ Raise an error if called on a subclass without ``get_absolute_url`` defined, to ensure all search results contains a URL. """ name = self.__class__.__name__ raise NotImplementedError("The model %s does not have " "get_absolute_url defined" % name) def get_absolute_url_with_host(self): """ Returns host + ``get_absolute_url`` - used by the various ``short_url`` mechanics below. Technically we should use ``self.site.domain``, here, however if we were to invoke the ``short_url`` mechanics on a list of data (eg blog post list view), we'd trigger a db query per item. Using ``current_request`` should provide the same result, since site related data should only be loaded based on the current host anyway. """ return current_request().build_absolute_uri(self.get_absolute_url()) def set_short_url(self): """ Generates the ``short_url`` attribute if the model does not already have one. Used by the ``set_short_url_for`` template tag and ``TweetableAdmin``. If no sharing service is defined (bitly is the one implemented, but others could be by overriding ``generate_short_url``), the ``SHORT_URL_UNSET`` marker gets stored in the DB. In this case, ``short_url`` is temporarily (eg not persisted) set to host + ``get_absolute_url`` - this is so that we don't permanently store ``get_absolute_url``, since it may change over time. """ if self.short_url == SHORT_URL_UNSET: self.short_url = self.get_absolute_url_with_host() elif not self.short_url: self.short_url = self.generate_short_url() self.save() def generate_short_url(self): """ Returns a new short URL generated using bit.ly if credentials for the service have been specified. """ from mezzanine.conf import settings settings.use_editable() if settings.BITLY_ACCESS_TOKEN: url = "https://api-ssl.bit.ly/v3/shorten?%s" % urlencode({ "access_token": settings.BITLY_ACCESS_TOKEN, "uri": self.get_absolute_url_with_host(), }) response = loads(urlopen(url).read().decode("utf-8")) if response["status_code"] == 200: return response["data"]["url"] return SHORT_URL_UNSET def _get_next_or_previous_by_publish_date(self, is_next, **kwargs): """ Retrieves next or previous object by publish date. We implement our own version instead of Django's so we can hook into the published manager and concrete subclasses. """ arg = "publish_date__gt" if is_next else "publish_date__lt" order = "publish_date" if is_next else "-publish_date" lookup = {arg: self.publish_date} concrete_model = base_concrete_model(Displayable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.all try: return queryset(**kwargs).filter(**lookup).order_by(order)[0] except IndexError: pass def get_next_by_publish_date(self, **kwargs): """ Retrieves next object by publish date. """ return self._get_next_or_previous_by_publish_date(True, **kwargs) def get_previous_by_publish_date(self, **kwargs): """ Retrieves previous object by publish date. """ return self._get_next_or_previous_by_publish_date(False, **kwargs) class RichText(models.Model): """ Provides a Rich Text field for managing general content and making it searchable. """ content = RichTextField(_("Content")) search_fields = ("content",) class Meta: abstract = True class OrderableBase(ModelBase): """ Checks for ``order_with_respect_to`` on the model's inner ``Meta`` class and if found, copies it to a custom attribute and deletes it since it will cause errors when used with ``ForeignKey("self")``. Also creates the ``ordering`` attribute on the ``Meta`` class if not yet provided. """ def __new__(cls, name, bases, attrs): if "Meta" not in attrs: class Meta: pass attrs["Meta"] = Meta if hasattr(attrs["Meta"], "order_with_respect_to"): order_field = attrs["Meta"].order_with_respect_to attrs["order_with_respect_to"] = order_field del attrs["Meta"].order_with_respect_to if not hasattr(attrs["Meta"], "ordering"): setattr(attrs["Meta"], "ordering", ("_order",)) return super(OrderableBase, cls).__new__(cls, name, bases, attrs) class Orderable(with_metaclass(OrderableBase, models.Model)): """ Abstract model that provides a custom ordering integer field similar to using Meta's ``order_with_respect_to``, since to date (Django 1.2) this doesn't work with ``ForeignKey("self")``, or with Generic Relations. We may also want this feature for models that aren't ordered with respect to a particular field. """ _order = OrderField(_("Order"), null=True) class Meta: abstract = True def with_respect_to(self): """ Returns a dict to use as a filter for ordering operations containing the original ``Meta.order_with_respect_to`` value if provided. If the field is a Generic Relation, the dict returned contains names and values for looking up the relation's ``ct_field`` and ``fk_field`` attributes. """ try: name = self.order_with_respect_to value = getattr(self, name) except AttributeError: # No ``order_with_respect_to`` specified on the model. return {} # Support for generic relations. field = getattr(self.__class__, name) if isinstance(field, GenericForeignKey): names = (field.ct_field, field.fk_field) return dict([(n, getattr(self, n)) for n in names]) return {name: value} def save(self, *args, **kwargs): """ Set the initial ordering value. """ if self._order is None: lookup = self.with_respect_to() lookup["_order__isnull"] = False concrete_model = base_concrete_model(Orderable, self) self._order = concrete_model.objects.filter(**lookup).count() super(Orderable, self).save(*args, **kwargs) def delete(self, *args, **kwargs): """ Update the ordering values for siblings. """ lookup = self.with_respect_to() lookup["_order__gte"] = self._order concrete_model = base_concrete_model(Orderable, self) after = concrete_model.objects.filter(**lookup) after.update(_order=models.F("_order") - 1) super(Orderable, self).delete(*args, **kwargs) def _get_next_or_previous_by_order(self, is_next, **kwargs): """ Retrieves next or previous object by order. We implement our own version instead of Django's so we can hook into the published manager, concrete subclasses and our custom ``with_respect_to`` method. """ lookup = self.with_respect_to() lookup["_order"] = self._order + (1 if is_next else -1) concrete_model = base_concrete_model(Orderable, self) try: queryset = concrete_model.objects.published except AttributeError: queryset = concrete_model.objects.filter try: return queryset(**kwargs).get(**lookup) except concrete_model.DoesNotExist: pass def get_next_by_order(self, **kwargs): """ Retrieves next object by order. """ return self._get_next_or_previous_by_order(True, **kwargs) def get_previous_by_order(self, **kwargs): """ Retrieves previous object by order. """ return self._get_next_or_previous_by_order(False, **kwargs) class Ownable(models.Model): """ Abstract model that provides ownership of an object for a user. """ user = models.ForeignKey(user_model_name, verbose_name=_("Author"), related_name="%(class)ss") class Meta: abstract = True def is_editable(self, request): """ Restrict in-line editing to the objects's owner and superusers. """ return request.user.is_superuser or request.user.id == self.user_id class SitePermission(models.Model): """ Permission relationship between a user and a site that's used instead of ``User.is_staff``, for admin and inline-editing access. """ user = models.ForeignKey(user_model_name, verbose_name=_("Author"), related_name="%(class)ss", unique=True) sites = models.ManyToManyField("sites.Site", blank=True, verbose_name=_("Sites")) class Meta: verbose_name = _("Site permission") verbose_name_plural = _("Site permissions") def create_site_permission(sender, **kw): sender_name = "%s.%s" % (sender._meta.app_label, sender._meta.object_name) if sender_name.lower() != user_model_name.lower(): return user = kw["instance"] if user.is_staff and not user.is_superuser: perm, created = SitePermission.objects.get_or_create(user=user) if created or perm.sites.count() < 1: perm.sites.add(current_site_id()) # We don't specify the user model here, because with 1.5's custom # user models, everything explodes. So we check the name of it in # the signal. post_save.connect(create_site_permission)
PypiClean
/ArticutAPI_Taigi-0.94-py3-none-any.whl/ArticutAPI_Taigi/defaultDict/moe_dict/ACTION_verb.py
moe_ActionVerb = ["了","刁","卜","了工","入木","刁古董","了本","卜卦","入厝","了錢","上","下","乞","亡","叉","大","干","弓","上山頭","上水","大心氣","大主大意","上北","下本","久仰","大舌","久見","上青苔","大便","三思","上桌","大氣喘","上崎","大細心","大摒掃","上殕","弓開","土想","大腹肚","大漢","大聲","中","仆","允","冗","勼","允人","公分","勼手","勼水","勻仔是","中立","分伻","分別","不服","中計","勼跤","分錢","化","反","歹","反天","文文仔笑","反爿","反白睚","歹死","欠血","反肚","反車","文身","比並","比武","毋知","毋知影","方便","毋挃","毋是","止枵","反背","毋值","毋准","毋捌","反桌","反症","少缺","反船","手術","止喙焦","心悶","反腹","反僥","止嗽","引魂","比論","允頭路","反輾轉","毋願","毋驚","火燒山","火燒厝","火燒埔","乍","仕","以","充公","仙拚仙","主婚","代筆","主意","凹","刊","卯","可","囚","央","出丁","出口","叫毋敢","出外","司奶","出帆","半信半疑","叫客","叫是","四界趖","加倍","出珠","出酒","出張","四淋垂","出喙","四散","叫菜","出業","凹落去","加話","包餡","出膿","出癖","包穡頭","包贌","夯","奶","扒","打","失人禮","打扎","夯枷","失敗","正著時","扒飯","失電","扒癢","失覺察","犯","甘","生","由","甲","申","白","立","用心","生囝","生囡仔","生卵","生活","白食","申冤","生做","生粒仔","白翎鷥","生湠","生菇","犯著","白煠","生話","生鉎","白講","生癬","任","份","休","伨","光","先知","共","冰","刐","刑","划","列","同","向","在","合力","在人","吐大氣","吊大筒","回心轉意","合用","吐吐","吐舌","吐肉箭","再版","合股","向前","向望","吊脰","向善","合掌","吊猴","向腰","吊鼎","回魂","吐憐涎","吊癀","吐穗","好","如","安","安土","存心","安心","安份","安宅","安床","多事","守空房","存後步","地動","好喙","安葬","守暝","存範","多謝","成","托","扞","扦","成人","收山","收水","成功","收冬","曲去","成年","收束","有身","有底","收泔","成物","扞家","成做","收喙","早睏","有雄","有歲","曲跤","扞鼎灶","扞數","收數","扞盤","死諍","收擔","有膭","扞頭","扛轎","老","聿","肉","行","行春","行軍","行氣","自強","行徙","行透透","行棋","行短路","自新","行路","自謙","伴","伻","克","免","伸勻","伸勼","作文","作田","伸長手","伴娶","免費","作業","伸跤","作穡","作孽","免驚","刜","刣","別","君","吠","吱","吶","吼","刣死","吵抐","吹狗螺","含冤","含梢","判罪","吭跤翹","刣頭","囥","囫","坉","坉平","坐位","囥步","坐桌","坐清","坉塗","囥歲","坐監","坐數","困難","妝","孝","完","孝孤","完婚","完結","形","忌","扱","扲","扴","扷","扶𡳞脬","扭尻川","忌床","弄風","忍氣","扮笑面","忌喙","弄喙花","扭搦","弄新娘","扳過來","弄鐃","抉","把","抌","抐","抔","投胎","改酒","改途","找錢","折舊","束","步","沃","沉","沐","沕","決心","沃水","沐手","沐水","沖犯","沐沐泅","沉底","沃肥","沃花","沃雨","汰衫","沙微","束腰","步輦","沃澹","牢","狂","皂","育囝","育囡仔","私通","育飼","禿額","足","車","邪","赤目","走色","走味","走相掠","走相逐","辛苦","走揣","走傱","走精","並","佬","佮","佯","使","佯生","使目尾","使目箭","使弄","來來去去","使性地","來洗","佮喙","佯痟","佮意","供體","佯戇","侗戇","刺","刻","剁","匼","受","刻印仔","到地","刺花","受苦","受氣","卸貨","卸貨底","刺膨紗","協議","呸","命","呾","和","咒","囷","呸血","咇咇掣","呿呿嗽","呵咾","呸面","呼音","咖哩嗹囉","呼蛋","呸痰","咒誓","呼噎仔","呼噓仔","呼觱仔","呸瀾","咒讖","坱","奅","姑情","定","屈","帕","定去","定性","定做","定著","怪","戽","怪人","戽水","延延","怦怦喘","承","抨","抾","拑","放刁","放工","放冗","承水","抾水","拍歹","拆日仔","拍毋見","拍手銃","拍尻川","拍石","拚生死","拚生理","拈田嬰","拍生驚","拍交落","抾囡仔","招囝婿","拆字","拚血","放卵","拍呃","拘束","拑牢牢","拋捙輪","拖命","放帖仔","拍官司","拚性命","拍拍","拋拋走","拍抐涼","拆股","拍金仔","拋近路","拚俗","拍咳啾","抾客","放屎","押後","抾恨","放毒","拍派","披衫","放重利","抹面","放風聲","拑家","拍拳","拍拳頭","放捒","抾柴","抹粉","放粉鳥","拗紙","招翁","放臭屁","抾骨","拆票","放符仔","拋魚","拍麻雀","拍喙鼓","拆單","拖棚","拍無去","抾稅","拍結毬","抾著","拄著","放債","拄搪","招會仔","放煙火","拍滂泅","拋碇","拗裒","放裒","拍算盤","拆腿","拋網","招魂","拍噗仔","拄數","放數","抹壁","拖磨","拚輸贏","抾錢","抽頭","放聲","拋輾斗","拍翸","抵額","拆藥仔","拋麒麟","拌蠓仔","拍觸衰","拍鐵","拍鱗","抹鹽","抽鬮","拈鬮","拗鬱","抾襇","抽躼","昏","東","昏去","東筊","明講","沬","泅","泏","泅水","治枵","沓滴","炕","注文","炕肉","注音","泡茶","炕菜頭","炊粿","炕窯","泡麵","直","知位","直透","知輕重","知影","芟","芡","花","罔行","股東","芡芳","罔度","糾筋","罔飼","虯","表","軋","近","迒","金金看","迎鬧熱","迎燈","侹","便看","剉","信用","保守","剉柴","俗賣","信篤","咯","咻","哀","品","哈","垂","咯血","品明","哈唏","咬喙齒根","咯痰","奕","姦撟","奏樂","封","封釘","封喙","度晬","思","急","拜公媽","拭尻川","恬恬","拜拜","拜候","拜堂","怨慼","拹","挂","挃","挌","挓","挔","挕","施","是","拹水","挓火","指指揬揬","挂紙","挕掉","按額","枵","架","染","染布","架跤","查數","歪","毒","洘","津","洗身軀","洞房","洘流","洗衫","洗面","歪哥","洗喙","洗盪","派","流","炤","炰","為","流目屎","炸油","流凊汗","活動","流湯","流鼻","流鼻水","流瀾","相","盹","相干","相欠債","相出路","相好","相刣","相告","相命","相拄","相拍","相招","相拍電","相倚","相借問","相唚","相捌","相迵","相送","相閃身","相閃車","相偃","相舂","相連紲","相尋","相換","相會","相楗","畏熱","相請","相諍","相激","相瞞","相輸","盹龜","相辭","相嚷","相觸","相攬","相讓","省","砉","研","看人無","省工","看日","看出出","省立","看有","看有起","省事","看衰","看袂起","看現現","看覓","看款","看無","看無起","看會起","看輕","看樣","看頭","耎","耐","背","胎","背冊","紅目","致身命","致使","背約","約會","致蔭","苛","苦","苴","苦毒","計","重","閂","重食","重耽","計畫","重新","要緊","限數","重錢","革","食","香","食人","食力","食奶","食名","食色","食苦","食倯","食家己","食桌","風神","食秤頭","食晝","食菜","面會","食暗","食補","食漿","食褒","食錢","風聲","食臊","食虧","食薰","風騷","修面","倖","倩","值","冤","借人","凊心","倒向","凌治","冤家","冤家量債","候脈","倒剾","倒會仔","倒摔向","倚壁","倒擔","倒頭行","倒頭栽","凍霜","倒覆","借蹛","凍露水","勍","哺","哼","唔","唚","哭爸","哭枵","哼哼叫","唚喙","哽著","套","套話","容","展","崁","差","容允","展威","展風神","家婆","差教","崁蓋","展寶","展覽","恭","挈","挐","挩","挨米","挩門","挨絃仔","挩窗","挨粿","挵","挼","挽","捀","捌","捒","捘","捙","揤","敆","料","捀斗","捀水","車布邊","捌字","捒走","捙拚","挽花","挵門","挽面","捙畚斗","挵破","挽脈","挽茶","挽草","捀茶","捒做堆","料理","捌貨","挽喙齒","捙跋反","捌想","揤電鈴","挵鼓","料算","捙盤","捆縛","捎錢","捋頭毛","捙輾斗","敆藥仔","挵鐘","捏麵尪仔","晟","栓","框","校對","晟養","梳","欱","氣","浡","烌","消水","烘火","烏有","烘肉","烏西","消定","消風","疶","泄尿","疶屎","病相思","疼痛","眠一下","砧皮鞋","眩車","破柴","眨眨𥍉","眩船","破腹","眠夢","神","神去","笑咍咍","租厝","紡","缺","罟","耙","胮","素食","舀水","臭火焦","航空","臭風","臭殕","袂直","袂赴","袂記得","袂堪得","袂曉","記","起","迵","迷","迸","逆","起𪁎","討人情","送上山","起手","逆天","送日仔","討皮疼","退冰","起色","起行","起狂","討命","起呸面","退定","送定","起性地","退股","訕削","討契兄","退後","討食","起厝","討海","退酒","退婚","起無空","起痟","起童","退童","起雄","起碇","起落","起鼓","起價","記數","訕潲","退熱","退癀","送禮","起雞母皮","針","鬥","鬥空","鬥股","鬥相共","閃風","釘根","陣疼","鬥陣","鬥無閒","閃著","鬥跤手","鬥夥計","鬥榫頭","酒醉","鬥鬧熱","鬥幫贊","釘點","偃","假","偏","偝","假𠢕","做人情","做大水","偏心","做歹","停手","做月內","假仙","做司公","做功德","剪布","做生日","做生理","做伙","做旬","做色","做忌","做店面","做客","偷食","偷食步","做風颱","偃倒","做臭人","做鬼","偷揜","假無意","假痟","做痢","停跤","做瘦氣","做親情","做頭","做戲","做醮","做議量","做譴損","勒","務","參","唌","唌人","啉水","啟示","區別","唬秤頭","啉茶","啉酒","啉湯","參詳","動箸","啄龜","啖糝","唱聲","圇","培","堅","堅巴","夠水","堅疕","娶某","堅凍","堅乾","娶新娘","培墓","夠額","專","密告","將軍","寄話","張","得人疼","張老","帶身命","張身勢","強制","張持","張掇","張鳥鼠","張等","得勢","強摃","帶膭","徙","徛","從","惜","惝","戛","挲","捥","戛火","惜皮","徛名","惜囝","徙位","捨施","惜面皮","徙栽","挲草","徛起","徛黃","挲圓","挲圓仔湯","徙跤","徛算","徙鋪","徛燈篙","徛靈","挲鹽","捽","捾","掖","掘","掜","探","掮","敗","救人","捾水","教冊","敗市","教育","掩咯雞","掩崁","掖秧仔","接接","接喙","掠猴","掠痧","掠筋","掃塗跤","掛意","探墓厝","掖種","敗價","推撨","掠龍","敗露","斜","旋","旋藤","欶","涼","欶水","欶奶","條直","欲知","淋","淘","清","烰","淹大水","淋水","清火","添油香","添話","清數","焐","圈","犁","現","牽公","牽手","牽牛","牽成","牽尪姨","牽抾","牽師仔","牽粉","牽罟","現胸","牽絲","牽羹","牽羅經","牽藤","理","產","眯","眼","疏開","理論","祭","窒","祭孤","窒倒街","窒喙空","祭獻","紩","紲","羞","紹介","紩衫","紲喙尾","粒積","紮錢","舂","脫手","脫身","脫箠","脫輪","莫","處置","術","袋","袚","規","覓","豉","設局","豉膎","設緣投","豉鹽","販","貫","赦","跂","透","通","逞","連","透雨","連累","販貨","跂跤","造話","造路","頂","陪綴","麻","麻醉","傍","勞","喀","喈","喋","喢","勞力","喘大氣","割肉","喝咻","喝玲瑯","喝拳","割貨","喝魚仔","割喉","創景","喙焦","喉滇","勞煩","喀痰","割稻仔","喨","圍","堪","報","報冤","圍爐","寒","尊","就","就近","就是","寒著","悲哀","悲傷","惡","惱","愖","掌","掔","掣","揈","擗","揌","捶心肝","掰手面","插代誌","插花仔","惱氣","插胳","揀茶","插喙","插牌","揀菜","插潲","提頭","揜","揣","揬","揲","摒","散","敧","敨","散工","敨中氣","摒本","敨氣","散陣","摒掃","摒貨底","敨開","散會","散學","揣頭路","欺","款","渧","減省","款勸","渴","湠","湠開","湠種","焙","焠","無","牚","猌","無下落","無去","無半項","無步","焦洗","無差","無眠","無夠月","無彩","無望","無細膩","無閒","無傳","無愛","牚頭","牚懸","痚","痟","痠","畫尪仔","發角","發性地","畫符仔","發喙齒","發穎","發癀","硞","硩","稅","睏坦敧","硩定","稅厝","睏晝","硞著","硩落去","硩嗽","硩驚","筅","絕","絲","結子","結果","結冤","絕氣","結趼","答喙鼓","絕路","結綵","結數","結穗","翕","舒","翕死","翕豆菜","翕相","脹氣","舒被","著","裂","覕","訴","著水蛆","著生驚","著災","覕雨","著咳嗾","著急","著病","覕喙","著寒熱仔","著猴","裂開","著傷","著銃","著頭","裁縫","診斷","著蟲","著驚","註","詈","評","貯","貺","費","貿","越","貼人食","貿工","貼本","超生","註好好","註死","趁私奇","買命","超度","趁食","貼貼","貯飯","註解","趁錢","越頭","跋","跍","跔","跙","跙一倒","進前","跋倒","跙倒","跑馬","跋桮","跋牌仔","跋筊","鈃","鈕","開市","開查某","開面","開桌","開基","開喙","開脾","開開","鈕鈕仔","酥腰","開錢","開臊","開藥仔","順","飲","黃","順風","順路","傱","剺","剾","剿","嗄","剾皮","剺肚","剾削","剾洗","剾風","剺破","剷草","傱錢","嗙","塌本","填本","嗚呼","填海","塌跤","塌錢","幌","嫁查某囝","嫁翁","感","戥","揫","惹代誌","愛媠","感著","意愛","感應","搐","搙","搝","搟","搢","搣","搤","浞","搦","搩","搪","搵","摃","搝大索","搵水","搖尾","搵豆油","搜身軀","搭油","搤空","搧風","搬厝","損神","斟茶","敬酒","斟酒","搢做前","搬徙","搧喙䫌","搪著","損傷","搬話","新興","搶頭香","搶頭標","搬戲","搖櫓","摃鐘","搦權","暗","會","楔","楗","楦","會同","楔空","楔後手","會計","暗崁","會做得","會堪得","會曉","楔縫","準","溢","歇工","歇冬","溢刺酸","準拄好","歇晝","歇寒","歇暗","歇暝","歇熱","滇","滒","煏","煞","煠","煡","煞心","煞去","煞尾","煏油","煏空","煏桌","煞著","煞鼓","煞戲","煎藥仔","照鏡","睨","當值","當選","碎","稟","禁氣","禁喙","筧","節","罩","節力","節脈","經絲","經跤經手","經過","罩雺","經線","罩霧","經驗","落下頦","落手","落本","落色","落車","落南","落屎","落胎","落風","落崎","落第","落船","落雪","落喉","落湳","落葬","落價","落褲","腫頷","落霜","號","號名","號做","解心悶","補破網","補紩","補喙齒","解愁","詬詬唸","補鼎","試鹹汫","詼","趒","跟","跩","畫虎𡳞","詳細","跟綴","較","遏","過心","過戶","過手","過日","遏手把","過目","過名","過年","較車","過往","過房","遏泔","過面","過時","運氣","過氣","跳針","跳逝","過喙","跳港","跳童","遇著","過意","過數","過磅","過爐","逼籤詩","鉤","閘","閘日","閘光","鉤耳","閘車","閘風","銃殺","閘路","頓","飼","髡","頓手","飼奶","頓印","隔界","隔間","飽滇","隔暝","電頭毛","頓龜","像","僥","厭","嗽","嗾","呲","嘔","厭𤺪","僥心","嘔紅","嗾著","僭話","嘛嘛吼","嘐潲","團","墊","夢","團圓","對","幔","廕","對分","對半","對立","實行","對沖","孵岫","對拄","對拗","對指","對看","對時","對喙","對換","對照","實鼻","對數","孵膿","摖","摠","摧","摔粟","慣勢","摠頭","摵","敱","斡","敲油","摺衫","敲電話","滿","滿月","滲尿","滲屎","漆","漉","漚","漩","熁","漂白","漩尿","漉喙","漏稅","演戲","演講","漲懸價","漚鹹菜","盡","碭","箍","管","綜合","管待","精差","種珠","精神","管數","管顧","網","綴","緊行","罰徛","綴會仔","綴路","舞","蓄","蓄厝","蓋被","蓄嫁粧","膏膏纏","蜜","蜷","蝕","蝕本","蝕重","誘","誤","賒","趖","認份","說多謝","誤會","認路","賒數","說謝","踅","鉸","鉼","遛手","遛皮","遛疕仔","踅玲瑯","踅街","踅踅唸","輕薄","踅輾轉","銬","頕","頕低","頕頭","鼻","鼻芳","儉","劌","儉儉仔用","儉錢","噗","噓尿","嘻嘻嘩嘩","噴點","噗薰","審","屧","影","影目","慼","戮","撆","撋","撏","撚","撟","撠","撆手䘼","撒豆油","撠刺","撚匼笑","戮空","撞突","撒胡椒","憢疑","撚骰仔","撆褲跤","撒鹽","撨","撫","數","撥工","佈田","數念","撩油","暫度","撨時鐘","數想","撨摵","佈稻仔","撥駕","暴穎","標","漿","漿泔","潑雨","漿洗","漿衫","標會仔","潤","澍","濆","熟","熥","熟似","獎","皺","盤","盤車","盤喙錦","盤話","盤撋","盤數","瞌","磅","窮","磅米芳","磅重","磕著","糊","糋","緟","緩","糊人","練痟話","蔫","蔭","褙","衝突","衝動","褒嗦","諍","論","賤","請人客","請安","請坐","踏差","請桌","請神","賭強","踢被","諍喙","踏話頭","賞罰","輪","遨","醉","遮日","適合","輪值","踩街","遮閘","輪鬮","銷","鋏","閬","閬工","閬縫","霆","鞏","餓","靠山","駛車","駛船","靠著","靠勢","養飼","霆雷公","駕駛","鬧台","鬧房","鬧廳","凝","凝血","噤喙","壅","學","學人","學工夫","壅田","壅肥","學話","懍","戰","撼","擇","擉","擔","擛","整","擔水","擛手","擔肥","擋咧","擔屎","擛風","操勞","整頓","擂鼓","擉算盤","擔輸贏","橐","歕","激","激力","激心","歕火","激外外","橫扴","激空","激屎","激派頭","歕風","激面腔","激氣","激酒","激骨","激腦","歕觱仔","歕雞胿","歕雞胿仔","燃","燖","燜","甍","燒金","燒香","燃柴","燒著","燜飯","磨","磨粿","積德","篡","篦","篩","縋","縖","縛","縖裙","縛跤","縛粽","興","膭水","膨疱","興酒","翱翱輾","蝹","褫","褪皮","褪赤跤","褪衫","褪殼","褫開","褪腹裼","褪齒","褪褲","褪褲𡳞","諞","謔","賰","賴","蹁","親手","親目","親密","親喙","親愛","躽","遹","遺","鋸","扮公伙仔","遵命","錚","錯手","隨在","隨身","隨意","隨緣","嚓","優先","嚓嚓趒","應","擘","擢","擠𤶃仔","壓味","擘金","應喙","應喙應舌","戴帽仔","擘開","應話","擤鼻","檢點","溼","濫","營","燥水","濫使","盪","瞪力","瞪屎","糝","縮","縫衫","糝粉","縮茶心","縫紩","總貿","總管","繃","罾","臆","臨","薅","臆出出","臨急","薅草","舉荐","罾魚","聳勢","聳鬚","謝","講白賊","講和","賺食","講破","講笑","講笑詼","講起","講情","謝絕","講嘐潲話","講演","講價","講親情","蹊蹺","謝願","還","鍊","鍤","還數","隱","隱弓蕉","黜","甪","點","鼾","點穴","黜破","黜臭","點眼","點醒","嚙","壘","擸","擽","攄","擲㧒捔","擽呧","斷路","攄頭毛","濺","瀉","爁","爁日","穢人","穢涗","繏","繐","繚","翸","繡花","翻厝","翻草","翻新","繏腰","翻頭","繏嚨喉","藃","蟯","覆","藏水沬","蟯蟯趖","蹔","蹛","軁","轉大人","轉外家","軁狗空仔","轉後頭","轉骨","轉喙","謹慎","蹔跤步","轉斡","轉踅","轉輪","轉聲","轉臍","蹧躂","軁鑽","醫","醬","鎖","鎮","離手","鎮地","離別","鎖門","雙棚鬥","鎮煞","鎮路","離緣","題","颺","餾","颺風","颺粟","餾粿","題緣","颺颺飛","攏","曝日","寵倖","曝粟","攏褲","攏權","曝鹽","爍","礙著","簸","繩","穩心仔","穩定","辭","辭別","辭頭路","贊聲","鏨","霧","關門","關童","鏨頭","騙囡仔","嚷","勸和","寶惜","攕","瀳","獻計","礤","礪","礬","礤簽","觸","躄","辮","觸舌","譬相","譬論講","饒赦","嚾","攑","攝","櫼","灇","爛","攑手","攝屎","灌風","攑箸","攑頭","攑懸","攝襇","纏綴","纏線","辯","露","霸","霸占","顧門","顧厝","響應","齧","囉","囊","彎","歡喜","囉嗦","癮","籠","糴","癮仙哥","讀死冊","糴米","聽候","聽喙","聽著","贖","顫","齪","齪嘈","纓纏","躘","鑢","顯","驗血","驗屍","變面","變鬼","變鬼變怪","躘被","變款","變猴弄","顯聖","變竅","讖","讓手","齴牙","齆鼻","糶","糶米","跕","躡","躡跤尾","鑽耳","鑿","鑿目","鬱","揻","贌","贌田","蹽","蹽溪仔","蹽落去","㧎","㧣","㨂","㨨","𠯗","𠯗一下","㤉","㤉潲","䀐","㾀","㧻","𢯾","𠞩","𠞭","㔂","㔂甘蔗","𨂿","𨂿著","𢼌","𩑾","𩛩","𩛩肥","䖙","𤆬","𤆬路","𤆬頭","𨑨迌","𥍉","𥍉𥍉看","𥍉目","𤉙","㨑","𧮙","𪐞","𤲍","𤲍燒","𦊓","㴘","㴘麵","㧒","䠡","㧌","𣁳","㨻","㨻魚","𧿳𧿳跳","必","必叉","必開","㴙㴙落","㴙㴙滴","𧿬","𧿬踏","𢪱","㽎","𨂾","𫝏","𫝏水","𫝏鋼","𫝺","𫞼","𫝻","𫟂","霧水","𬦰","𬦰山","𬦰樓梯","必巡","交仗","伨頭","牢鼎","佮股","定親","明品","怨天怨地","挕捒","活魚栽","倒退攄","捀場","起帆","起磅","起戇","鬥榫","徛台","捽繩","淋碭","尊存","掣一趒","插濫","欹空","湢","發輦","睏一醒","睏中晝","開聲","剾痧","搝票","照電光","落人","落台","過暝","嘔酸水","對相","滾耍笑","滾絞疼","漚肥","認命","踅頭","樂跎","餒志","擔罪","貓貓相","趨冰","攑頭香","鬱拗","𫝏錢","入心","大翻頭","反口供","反形","反種","引𤆬","手賤","歹紡","仝心","半遂","去倒","央倩","央教","交落枕","交繃","安太歲","成格","收煞","自作自專","血崩山","行跤花","囥話","坉錢","坐毋著","坐底","妝娗","牢腹","育𤆬","走無路","走腹","走標","走學","走縒","咇噗跳","宕","定貨","往診","抹烏","抾肉幼仔","拆白講","拍火","拍速","放目","放伴","放紙虎","放銃","狗鯊","直目","空思夢想","金金相","長志","𤆬領","咬舌","品並","客滿","硩墓紙","挑花刺繡","相伨","相揣","看口","看命","食風","食茶","食罪","食認","食餐廳","食聲","倒轉去","倚蹛","凌勒","哽胿","挨磨","挽筋","捙倒","消敨","病囝","破豆","破鼎","笑微微","納錢","袂拄好","袂磕得","起豹飆","迵海","退流","退時","㨂甲","做水","做囮","做竅","寄罪","寄跤","徙岫","捾定","推捒","敗馬","窒車","透濫","備辦","喋詳","報戶口","提囡仔","敨放","智覺","湠根","發爐","硩注","筅黗","翕汗","著獎","著觸","覕鬚","貿貨底","跋感情","跙流籠","閒煩","催油","幌頭","搭胸坎","摃球","摃鼓","會毋著","會得過","溜旋","煞煞去","禁尿","落山","落貨","落漆","哭賴","話仙","頓蹬","𠞭花","嗺","對重","對頭誤","摔大眠","蓋蠟","認捌","撨徙","練仙","諒情","閬港","駐水","燃水","褪毛","賴賴趖","踮沬","點拄","蹛院","轉錢空","顧暝","變弄","變無魍","鑢卡","鑽水沬","隨人食","做肉餅","相𤆬走","食飽","做伴","唸歌","堅心","著愛","袂爽","大細聲","伸輪","滇流","講耍笑","騙痟的","譬論","失氣","交葛","相放伴","乞龜","滾躘","轉來去","拍莓","消蝕","搜揣","經布","合仔趁","著囝甘","輪火鬮","傍官靠勢","掩來扯去","徛馬勢","起番","放外外","上童","號令","出代誌","上陸","止飢","摒盪","敗欉","放雨白","相疼痛","食死死","出來去","爍爍顫","激頭腦","變撚","辟邪","百歲年老","講通和","合味","無命","出日","盤山過嶺","僭權","相袚","奸臣仔笑","坐罪","苦無","倒匼","激皮皮","激槌槌","欲死","徛騰騰","起去"]
PypiClean
/CAuthomatic-0.1.5.tar.gz/CAuthomatic-0.1.5/authomatic/extras/gae/openid.py
# We need absolute import to import from openid library which has the same # name as this module from __future__ import absolute_import import logging import datetime from google.appengine.ext import ndb import openid.store.interface class NDBOpenIDStore(ndb.Expando, openid.store.interface.OpenIDStore): """ |gae| `NDB <https://developers.google.com/appengine/docs/python/ndb/>`_ based implementation of the :class:`openid.store.interface.OpenIDStore` interface of the `python-openid`_ library. """ serialized = ndb.StringProperty() expiration_date = ndb.DateTimeProperty() # we need issued to sort by most recently issued issued = ndb.IntegerProperty() @staticmethod def _log(*args, **kwargs): pass @classmethod def storeAssociation(cls, server_url, association): # store an entity with key = server_url issued = datetime.datetime.fromtimestamp(association.issued) lifetime = datetime.timedelta(0, association.lifetime) expiration_date = issued + lifetime entity = cls.get_or_insert( association.handle, parent=ndb.Key( 'ServerUrl', server_url)) entity.serialized = association.serialize() entity.expiration_date = expiration_date entity.issued = association.issued cls._log( logging.DEBUG, u'NDBOpenIDStore: Putting OpenID association to datastore.') entity.put() @classmethod def cleanupAssociations(cls): # query for all expired cls._log( logging.DEBUG, u'NDBOpenIDStore: Querying datastore for OpenID associations.') query = cls.query(cls.expiration_date <= datetime.datetime.now()) # fetch keys only expired = query.fetch(keys_only=True) # delete all expired cls._log( logging.DEBUG, u'NDBOpenIDStore: Deleting expired OpenID associations from datastore.') ndb.delete_multi(expired) return len(expired) @classmethod def getAssociation(cls, server_url, handle=None): cls.cleanupAssociations() if handle: key = ndb.Key('ServerUrl', server_url, cls, handle) cls._log( logging.DEBUG, u'NDBOpenIDStore: Getting OpenID association from datastore by key.') entity = key.get() else: # return most recently issued association cls._log( logging.DEBUG, u'NDBOpenIDStore: Querying datastore for OpenID associations by ancestor.') entity = cls.query(ancestor=ndb.Key( 'ServerUrl', server_url)).order(-cls.issued).get() if entity and entity.serialized: return openid.association.Association.deserialize( entity.serialized) @classmethod def removeAssociation(cls, server_url, handle): key = ndb.Key('ServerUrl', server_url, cls, handle) cls._log( logging.DEBUG, u'NDBOpenIDStore: Getting OpenID association from datastore by key.') if key.get(): cls._log( logging.DEBUG, u'NDBOpenIDStore: Deleting OpenID association from datastore.') key.delete() return True @classmethod def useNonce(cls, server_url, timestamp, salt): # check whether there is already an entity with the same ancestor path # in the datastore key = ndb.Key( 'ServerUrl', str(server_url) or 'x', 'TimeStamp', str(timestamp), cls, str(salt)) cls._log( logging.DEBUG, u'NDBOpenIDStore: Getting OpenID nonce from datastore by key.') result = key.get() if result: # if so, the nonce is not valid so return False cls._log( logging.WARNING, u'NDBOpenIDStore: Nonce was already used!') return False else: # if not, store the key to datastore and return True nonce = cls(key=key) nonce.expiration_date = datetime.datetime.fromtimestamp( timestamp) + datetime.timedelta(0, openid.store.nonce.SKEW) cls._log( logging.DEBUG, u'NDBOpenIDStore: Putting new nonce to datastore.') nonce.put() return True @classmethod def cleanupNonces(cls): # get all expired nonces cls._log( logging.DEBUG, u'NDBOpenIDStore: Querying datastore for OpenID nonces ordered by expiration date.') expired = cls.query().filter( cls.expiration_date <= datetime.datetime.now()).fetch( keys_only=True) # delete all expired cls._log( logging.DEBUG, u'NDBOpenIDStore: Deleting expired OpenID nonces from datastore.') ndb.delete_multi(expired) return len(expired)
PypiClean
/DSRE-0.2.tar.gz/DSRE-0.2/HATT/hatt/data_loader/data_loader.py
import torch import random import torch.utils.data as data import numpy as np import pandas as pd from collections import Counter import sklearn.metrics import utils np.seterr(divide='ignore', invalid='ignore') class BagREDataset(data.Dataset): """ Bag-level relation extraction dataset. Note that relation of NA should be named as 'NA'. """ def __init__(self, path, rel2id, tokenizer, entpair_as_bag=False, bag_size=0, mode=None): """ Args: path: path of the input file rel2id: dictionary of relation->id mapping tokenizer: function of tokenizing entpair_as_bag: if True, bags are constructed based on same entity pairs instead of same relation facts (ignoring relation labels) bag_size: bag size mode: training model. Defaults to multi-instance (bag) training """ super().__init__() self.tokenizer = tokenizer self.rel2id = rel2id self.entpair_as_bag = entpair_as_bag self.bag_size = bag_size if "NYT-10" in path: self.data = pd.read_json(path, encoding='utf8') self.data = self.data.to_dict('records') # Construct bag-level dataset if mode == None: self.weight = np.zeros((len(self.rel2id)), dtype=np.float32) self.bag_scope = [] self.name2id = {} self.bag_name = [] self.facts = {} for idx, item in enumerate(self.data): rel_fact = (item['h_id'], item['t_id'], item['relation']) if item['relation'] != 'NA': self.facts[rel_fact] = 1 if entpair_as_bag: name = (item['h_id'], item['t_id']) else: name = rel_fact if name not in self.name2id: self.name2id[name] = len(self.name2id) self.bag_scope.append([]) self.bag_name.append(name) self.bag_scope[self.name2id[name]].append(idx) self.weight[self.rel2id[item['relation']]] += 1.0 self.weight = np.float32(1.0 / (self.weight ** 0.05)) self.weight = torch.from_numpy(self.weight) else: pass elif "GDS" in path: self.data = pd.read_csv(path, sep='\t', encoding='utf-8') self.data = self.data.to_dict('records') # Construct bag-level dataset if mode == None: self.weight = np.zeros((len(self.rel2id)), dtype=np.float32) self.bag_scope = [] self.name2id = {} self.bag_name = [] self.facts = {} for idx, item in enumerate(self.data): rel_fact = (item['h_FB_ID'], item['t_FB_ID'], item['relation']) if item['relation'] != "no_relation": # self.facts[rel_fact] = 1 if entpair_as_bag: name = (item['h_FB_ID'], item['t_FB_ID']) else: name = rel_fact if name not in self.name2id: self.name2id[name] = len(self.name2id) self.bag_scope.append([]) self.bag_name.append(name) self.bag_scope[self.name2id[name]].append(idx) self.weight[self.rel2id[item['relation']]] += 1.0 self.weight = np.float32(1.0 / (self.weight ** 0.05)) self.weight = torch.from_numpy(self.weight) else: pass def __len__(self): return len(self.bag_scope) def __getitem__(self, index): bag = self.bag_scope[index] if self.bag_size > 0: if self.bag_size <= len(bag): resize_bag = random.sample(bag, self.bag_size) else: resize_bag = bag + list(np.random.choice(bag, self.bag_size - len(bag))) bag = resize_bag seqs = None rel = self.rel2id[self.data[bag[0]]['relation']] for sent_id in bag: item = self.data[sent_id] seq = list(self.tokenizer(item)) if seqs is None: seqs = [] for i in range(len(seq)): seqs.append([]) for i in range(len(seq)): seqs[i].append(seq[i]) for i in range(len(seqs)): seqs[i] = torch.cat(seqs[i], 0) # (bag_size, L) return [rel, self.bag_name[index], len(bag)] + seqs def collate_fn(data): data = list(zip(*data)) label, bag_name, count = data[:3] seqs = data[3:] for i in range(len(seqs)): seqs[i] = torch.cat(seqs[i], 0) # (sumn, L) seqs[i] = seqs[i].expand((torch.cuda.device_count(),) + seqs[i].size()) scope = [] start = 0 for c in count: scope.append((start, start + c)) start += c assert (start == seqs[0].size(1)) scope = torch.tensor(scope).long() label = torch.tensor(label).long() return [label, bag_name, scope] + seqs def collate_bag_size_fn(data): data = list(zip(*data)) label, bag_name, count = data[:3] seqs = data[3:] for i in range(len(seqs)): seqs[i] = torch.stack(seqs[i], 0) scope = [] start = 0 for c in count: scope.append((start, start + c)) start += c label = torch.tensor(label).long() return [label, bag_name, scope] + seqs def eval(self, pred_result, model_name, save_eval_metrics=False): """ Args: pred_result: a list with dict {'entpair': (head_id, tail_id), 'relation': rel, 'score': score}. Note that relation of NA should be excluded. model_name: name of the model save_eval_metrics: declares whether to store evaluation metrics or not Return: {'prec': narray[...], 'rec': narray[...], 'auc': xx, 'p@all': xx, 'p@100': xx, 'p@200': xx, 'p@300': xx, 'p@500': xx, 'p@1000': xx, 'p@2000': xx, 'rel_dist_at_300': dict{...}, 'rel_facts': narray[...], 'sorted_pred_results': narray[...], 'rel_pos_dist_at_300':dict{...}} prec (precision) and rec (recall) are in micro style. prec (precision) and rec (recall) are sorted in the decreasing order of the score. """ sorted_pred_result = sorted(pred_result, key=lambda x: x['score'], reverse=True) prec = [] rec = [] correct = 0 total = len(self.facts) for i, item in enumerate(sorted_pred_result): if (item['entpair'][0], item['entpair'][1], item['relation']) in self.facts: correct += 1 prec.append(float(correct) / float(i + 1)) rec.append(float(correct) / float(total)) auc = np.around(sklearn.metrics.auc(x=rec, y=prec), 4) np_prec = np.array(prec) np_rec = np.array(rec) def prec_at_n(n): correct = 0 for i, item in enumerate(sorted_pred_result[:n]): if (item['entpair'][0], item['entpair'][1], item['relation']) in self.facts: correct += 1 return (correct / n) prec_at_all = prec_at_n(len(sorted_pred_result)) prec_at_100 = prec_at_n(100) prec_at_200 = prec_at_n(200) prec_at_300 = prec_at_n(300) prec_at_500 = prec_at_n(500) prec_at_1000 = prec_at_n(1000) prec_at_2000 = prec_at_n(2000) rel_at_300 = [x['relation'] for x in sorted_pred_result[0:300]] rel_dist_at_300 = dict(Counter(rel_at_300)) rel_pos_dist_at_300 = dict(Counter([x['relation'] for x in sorted_pred_result[0:300] if x['score'] > 0.5])) # Return the eval metrics if save_eval_metrics: print("Saving eval metrics for testing set") utils.plot_precision_recall_curve(np_prec, np_rec, auc, model_name) utils.save_precision_recall_values(np_prec, np_rec, model_name) utils.save_eval_metrics(prec_at_100, prec_at_200, prec_at_300, prec_at_500, prec_at_1000, prec_at_2000, prec_at_all, auc, model_name) utils.save_labels_distribution_at_top_300_predictions(rel_dist_at_300, model_name) utils.save_relational_facts(self.facts, model_name) utils.save_sorted_pred_results(sorted_pred_result, model_name) return {'prec': np_prec, 'rec': np_rec, 'auc': auc, 'p@all': prec_at_all, 'p@100': prec_at_100, 'p@200': prec_at_200, 'p@300': prec_at_300, 'p@500': prec_at_500, 'p@1000': prec_at_1000, 'p@2000': prec_at_2000, 'rel_dist_at_300': rel_dist_at_300, 'relfacts': self.facts, 'sorted_pred_results': sorted_pred_result, 'rel_pos_dist_at_300': rel_pos_dist_at_300} def BagRELoader(path, rel2id, tokenizer, batch_size, shuffle, entpair_as_bag=False, bag_size=0, num_workers=0, collate_fn=BagREDataset.collate_fn): if bag_size == 0: collate_fn = BagREDataset.collate_fn else: collate_fn = BagREDataset.collate_bag_size_fn dataset = BagREDataset(path, rel2id, tokenizer, entpair_as_bag=entpair_as_bag, bag_size=bag_size) data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=num_workers, collate_fn=collate_fn) return data_loader
PypiClean
/CmonCrawl-1.0.3.tar.gz/CmonCrawl-1.0.3/cmoncrawl/middleware/stompware.py
from datetime import datetime import json from typing import List from cmoncrawl.aggregator.index_query import IndexAggregator from cmoncrawl.common.loggers import all_purpose_logger import asyncio from dataclasses import dataclass from datetime import datetime, timedelta from typing import Dict, List, Set, Tuple from cmoncrawl.aggregator.utils.helpers import unify_url_id from stomp import Connection, ConnectionListener from stomp.utils import Frame from stomp.exception import StompException from cmoncrawl.common.types import DomainRecord from cmoncrawl.processor.pipeline.pipeline import ProcessorPipeline DUPL_ID_HEADER = "_AMQ_DUPL_ID" @dataclass class Message: dr: DomainRecord headers: Dict[str, str] @dataclass class ListnerStats: messages: int = 0 last_message_time: datetime = datetime.now() class ArtemisAggregator: """ Aggregator that listens queries the common crawl index and sends the results to a queue using the stomp protocol. It the creates a queue with name `queue.{url}` and sends the results to it. It also creates a topic with name `topic.poisson_pill.{url}` and sends a message with type `poisson_pill` to it when it finishes. Args: queue_host (str): The host of the queue queue_port (int): The port of the queue url (str): The url of the queue index_agg (IndexAggregator): The index aggregator heartbeat (int, optional): The heartbeat of the connection. Defaults to 10000. """ def __init__( self, queue_host: str, queue_port: int, url: str, index_agg: IndexAggregator, heartbeat: int = 10000, ): self.queue_host = queue_host self.queue_port = queue_port self.index_agg = index_agg self.url = url self.heartbeat = heartbeat def _init_connection(self): conn = Connection( [(self.queue_host, self.queue_port)], heartbeats=(self.heartbeat, self.heartbeat), ) conn.connect(login="producer", passcode="producer", wait=True) # type: ignore all_purpose_logger.info(f"Connected to queue") return conn async def aggregate(self, filter_duplicates: bool = True): """ Aggregates the results of the index aggregator and sends them to the queue. If `filter_duplicates` is True, it will use the `DUPL_ID_HEADER` header, which Artemis uses to filter duplicates. """ while True: try: conn = self._init_connection() break except StompException as e: all_purpose_logger.error(e, exc_info=True) await asyncio.sleep(5) continue i = 0 async with self.index_agg as aggregator: async for domain_record in aggregator: try: while not conn.is_connected(): conn = self._init_connection() json_str = json.dumps(domain_record.__dict__, default=str) headers = {} id = unify_url_id(domain_record.url or "") if filter_duplicates: headers[DUPL_ID_HEADER] = id conn.send( # type: ignore f"queue.{self.url}", json_str, headers=headers, ) all_purpose_logger.debug( f"Sent url: {domain_record.url} with id: {id}" ) i += 1 except (StompException, OSError) as e: all_purpose_logger.error(e, exc_info=True) continue except Exception as e: all_purpose_logger.error(e, exc_info=True) break all_purpose_logger.info(f"Sent {i} messages") conn.send( # type: ignore f"topic.poisson_pill.{self.url}", "", headers={"type": "poisson_pill"} ) conn.disconnect() # type: ignore class ArtemisProcessor: """ Processor that listens to a queues and processes the messages using a pipeline. When it receives a message with type enough `poisson_pill` messages, it will stop listening if it doesn't receive any messages for `timeout` minutes. Args: queue_host (str): The host of the queue queue_port (int): The port of the queue pills_to_die (int, optional): The number of `poisson_pill` messages to receive before dying. Defaults to None. queue_size (int): The size of the queue timeout (int): The timeout in minutes addresses (List[str]): The addresses of the queues pipeline (ProcessorPipeline): The pipeline to use for processing heartbeat (int, optional): The heartbeat of the connection. Defaults to 10000. """ def __init__( self, queue_host: str, queue_port: int, pills_to_die: int | None, queue_size: int, timeout: int, addresses: List[str], pipeline: ProcessorPipeline, heartbeat: int = 10000, ): self.queue_host = queue_host self.queue_port = queue_port self.pills_to_die = pills_to_die self.queue_size = queue_size self.timeout = timeout self.pipeline = pipeline self.addresses = addresses self.heartbeat = heartbeat class Listener(ConnectionListener): def __init__( self, messages: asyncio.Queue[Message], listener_stats: ListnerStats, ): self.messages = messages self.pills = 0 self.listener_stats = listener_stats def on_message(self, frame: Frame): if frame.headers.get("type") == "poisson_pill": # type: ignore self.pills += 1 else: msg_json = json.loads(frame.body) # type: ignore try: msg_json["timestamp"] = datetime.fromisoformat( msg_json.get("timestamp") ) domain_record = DomainRecord(**msg_json) self.messages.put_nowait(Message(domain_record, frame.headers)) # type: ignore self.listener_stats.messages += 1 self.listener_stats.last_message_time = datetime.now() except ValueError: pass def _init_connection(self, addresses: List[str]): conn = Connection( [(self.queue_host, self.queue_port)], reconnect_attempts_max=-1, heartbeats=(self.heartbeat, self.heartbeat), ) conn.connect(login="consumer", passcode="consumer", wait=True) # type: ignore for address in addresses: conn.subscribe(address, id=address, ack="client-individual") # type: ignore conn.subscribe("topic.poisson_pill.#", id="poisson_pill", ack="auto") # type: ignore listener_stats = ListnerStats() listener = self.Listener(asyncio.Queue(0), listener_stats) conn.set_listener("", listener) # type: ignore all_purpose_logger.info("Connected to queue") return conn, listener async def _call_pipeline_with_ack( self, pipeline: ProcessorPipeline, msg: Message, client: Connection, ): # Make sure no exception is thrown from this function # So that we can nack it if needed paths = [] try: paths = await pipeline.process_domain_record(msg.dr, {}) # Ack at any result client.ack(msg.headers.get("message-id"), msg.headers.get("subscription")) except KeyboardInterrupt: raise KeyboardInterrupt except Exception as e: client.nack(msg.headers.get("message-id"), msg.headers.get("subscription")) all_purpose_logger.error(f"Error in pipeline: {e}", exc_info=True) return (msg, paths) async def process(self): timeout_delta = timedelta(minutes=self.timeout) # Set's extractor path based on config pending_extracts: Set[asyncio.Task[Tuple[Message, List[str]]]] = set() while True: try: conn, listener = self._init_connection(self.addresses) break except StompException as e: all_purpose_logger.error(e, exc_info=True) await asyncio.sleep(5) continue all_purpose_logger.debug("Connecting to queue") extracted_num = 0 try: if hasattr(self.pipeline.downloader, "__aenter__"): await self.pipeline.downloader.__aenter__() # type: ignore while True: if ( listener.messages.empty() and ( self.pills_to_die is None or listener.pills >= self.pills_to_die ) and datetime.now() - listener.listener_stats.last_message_time >= timeout_delta ): all_purpose_logger.info( f"No new messages in {self.timeout} minutes, exiting" ) break try: # Auto reconnect if queue disconnects if not conn.is_connected(): conn, listener = self._init_connection(self.addresses) if len(pending_extracts) > 0: done, pending_extracts = await asyncio.wait( pending_extracts, return_when="FIRST_COMPLETED" ) for task in done: message, paths = task.result() if len(paths) > 0: for path in paths: all_purpose_logger.info( f"Downloaded {message.dr.url} to {path}" ) extracted_num += 1 else: all_purpose_logger.info( f"Failed to extract {message.dr.url}" ) while ( len(pending_extracts) < self.queue_size and not listener.messages.empty() ): pending_extracts.add( asyncio.create_task( self._call_pipeline_with_ack( self.pipeline, listener.messages.get_nowait(), conn ) ) ) except StompException as e: all_purpose_logger.error(e, exc_info=True) continue except Exception as e: all_purpose_logger.error(e, exc_info=True) break # Process reamining stuff in queue gathered = await asyncio.gather(*pending_extracts, return_exceptions=True) for task in gathered: if isinstance(task, Exception): continue message, paths = task if len(paths) > 0: for path in paths: all_purpose_logger.info( f"Downloaded {message.dr.url} to {path}" ) extracted_num += 1 else: all_purpose_logger.info(f"Failed to extract {message.dr.url}") except Exception as e: pass finally: if hasattr(self.pipeline.downloader, "__aexit__"): await self.pipeline.downloader.__aexit__(None, None, None) # type: ignore all_purpose_logger.info( f"Extracted {extracted_num}/{listener.listener_stats.messages} articles" ) conn.disconnect() # type: ignore
PypiClean
/BIDSHandler-0.2.1-py3-none-any.whl/bidshandler/session.py
import os import os.path as op from collections import OrderedDict import xml.etree.ElementTree as ET import pandas as pd from .utils import _get_bids_params, _copyfiles, _realize_paths, _combine_tsv from .bidserrors import MappingError, NoScanError, AssociationError from .scan import Scan from .querymixin import QueryMixin _RAW_FILETYPES = ('.nii', '.bdf', '.con', '.sqd') # TODO: add more... class Session(QueryMixin): """Session-level object. Parameters ---------- id_ : str Id of the session. This is the sequence of characters after `'ses-'`. subject : :class:`bidshandler.Subject` Parent Subject object containing this Session. initialize : bool, optional Whether to parse the folder and load any child structures. no_folder : bool, optional Whether or not the session is contained within a `ses-XX` folder. For experiments with multiple sessions each folder will correspond to a Session object, however if there is only a single session this can be omitted and the Subject folder is in fact the Session folder. """ def __init__(self, id_, subject, initialize=True, no_folder=False): super(Session, self).__init__() self._id = id_ self.subject = subject self._scans_tsv = None self._scans = [] self.recording_types = [] self._queryable_types = ('session', 'scan') self.has_no_folder = no_folder if initialize: self._add_scans() self._check() #region public methods def add(self, other, copier=_copyfiles): """.. # noqa Add another Scan or Session to this object. Parameters ---------- other : Instance of :class:`bidshandler.Scan` or :class:`bidshandler.Session` Object to be added to this Session. The added object must already exist in the same context as this object. copier : function, optional A function to facilitate the copying of any applicable data. This function must have the call signature `function(src_files: list, dst_files: list)` Where src_files is the list of files to be moved and dst_files is the list of corresponding destinations. This will default to using utils._copyfiles which simply implements :py:func:`shutil.copy` and creates any directories that do not already exist. """ if isinstance(other, Session): if self._id == other._id: for scan in other.scans: self.add(scan, copier) else: raise ValueError("Added session must have same ID.") elif isinstance(other, Scan): # TODO-LT: handle other modalities # We need to make sure that the scan is of the same person/session: if (self._id == other.session._id and self.subject._id == other.subject._id and self.project._id == other.project._id): # Handle merging the scans.tsv file. if other in self: # We don't want to add it if it is already in this session. # TODO: add overwrite argument to allow it to still be # added. return other_scan_df = pd.DataFrame( OrderedDict([ ('filename', [other.raw_file_relative]), ('acq_time', [other.acq_time])]), columns=['filename', 'acq_time']) # Combine the new data into the original tsv. _combine_tsv(self.scans_tsv, other_scan_df, 'filename') # Assign as a set to avoid any potential doubling of the raw # file path. files = set(other.associated_files.values()) files.add(other._sidecar) files.add(other._raw_file) # Copy the files over. fl_left = _realize_paths(other, files) fl_right = [] for fpath in files: fl_right.append(op.join(self.path, other._path, fpath)) copier(fl_left, fl_right) # Add the scan object to our scans list. scan = Scan(other.raw_file_relative, self, acq_time=other.acq_time) self._scans.append(scan) else: raise AssociationError("scan", "project, subject and session") else: raise TypeError("Cannot add a {0} object to a Subject".format( type(other).__name__)) def contained_files(self): """Get the list of contained files. Returns ------- file_list : list List with paths to all contained files relating to the BIDS structure. """ file_list = set() file_list.add(_realize_paths(self, self._scans_tsv)) for scan in self.scans: file_list.update(scan.contained_files()) return file_list def scan(self, task=None, acq=None, run=None): # TODO: Allow this to return a list if mutliple scans match. # Consider None a wildcard. """Return the contained Scan corresponding to the provided values Parameters ---------- task : str Value of `task` in the BIDS filename. acq : str Value of `acq` in the BIDS filename. run : str Value of `run` in the BIDS filename. Returns ------- scan : :class:`bidshandler.Scan` Scan object. """ for scan in self.scans: if (scan.task == task and scan.acq == acq and scan.run == run): return scan raise NoScanError #region private methods def _add_scans(self): """Parse the session folder to find what recordings are included.""" for fname in os.listdir(self.path): full_path = op.join(self.path, fname) # Each sub-directory is considered a separate type of recording. if op.isdir(full_path): self.recording_types.append(fname) # The only other non-folder should be the scans tsv. else: filename_data = _get_bids_params(fname) if filename_data.get('file', None) == 'scans': # Store the path and extract the paths of the scans. self._scans_tsv = fname scans = pd.read_csv(_realize_paths(self, self._scans_tsv), sep='\t') column_names = set(scans.columns.values) if 'filename' not in column_names: raise MappingError( "{0} contains no 'filename' column".format( self.scans_tsv)) column_names.remove('filename') for i in range(len(scans)): row = scans.iloc[i] fname = row.pop('filename') self._scans.append( Scan(fname, self, **dict(row))) # if we haven't found a scans.tsv file then we need to add all the # scans in a different way. if self._scans_tsv is None: # for now do just MRI stuff which is any .nii.gz file I think? #TODO: have a switch for each folder name? for rec_type in self.recording_types: if rec_type not in ('anat', 'dwi'): rec_path = _realize_paths(self, rec_type) if rec_type == 'fmap': # fieldmap sequence # The files with `file` = `magnitude1` are not raw # scans. filename_data = _get_bids_params(fname) if ((filename_data['file'] not in ('magnitude1', 'magnitude2')) and 'nii' in fname): self._scans.append( Scan(op.join(rec_type, fname), self)) for fname in os.listdir(rec_path): for ext in _RAW_FILETYPES: if ext in fname: self._scans.append( Scan(op.join(rec_type, fname), self)) def _check(self): """Check that there is at least one included scan.""" if len(self._scans) == 0: raise MappingError("No scans found in {0}/{1}/{2}.".format( self.project.ID, self.subject.ID, self.ID)) @staticmethod def _clone_into_subject(subject, other): """Create a copy of the Session with a new parent Subject. Parameters ---------- subject : :class:`bidshandler.Subject` New parent Subject. other : :class:`BIDSHandler.Session` Original Session instance to clone. Returns ------- new_session : :class:`bidshandler.Session` New uninitialized Session cloned from `other` to be a child of `subject`. """ os.makedirs(_realize_paths(subject, other.ID), exist_ok=True) # Create a new empty session object. new_session = Session(other._id, subject, initialize=False) new_session._create_empty_scan_tsv() return new_session def _create_empty_scan_tsv(self): """Create an empty scans.tsv file for this session.""" self._scans_tsv = '{0}_{1}_scans.tsv'.format(self.subject.ID, self.ID) full_path = _realize_paths(self, self._scans_tsv) if not op.exists(full_path): df = pd.DataFrame(OrderedDict([('filename', [])]), columns=['filename']) df.to_csv(full_path, sep='\t', index=False, na_rep='n/a', encoding='utf-8') def _generate_map(self): """Generate a map of the Session. Returns ------- root : :py:class:`xml.etree.ElementTree.Element` Xml element containing session information. """ root = ET.Element('Session', attrib={'ID': str(self._id)}) for scan in self.scans: root.append(scan._generate_map()) return root #region properties @property def bids_tree(self): """Parent :class:`bidshandler.BIDSTree` object.""" return self.project.bids_tree @property def ID(self): """ID with 'ses' prefix.""" return 'ses-{0}'.format(self._id) @property def inheritable_files(self): """List of files that are able to be inherited by child objects.""" files = self.subject.inheritable_files for fname in os.listdir(self.path): abs_path = _realize_paths(self, fname) if op.isfile(abs_path): files.append(abs_path) return files @property def path(self): """Determine path location based on parent paths.""" if self.has_no_folder: return self.subject.path return op.join(self.subject.path, self.ID) @property def project(self): """Parent :class:`bidshandler.Project` object.""" return self.subject.project @property def scans(self): """List of contained Scans.""" return self._scans @property def scans_tsv(self): """Absolute path of associated scans.tsv file.""" return _realize_paths(self, self._scans_tsv) #region class methods def __contains__(self, other): """Determine whether the Session object contains a scan. Parameters ---------- other : :class:`bidshandler.Scan` Object to test whether it is contained in this Session. Returns ------- bool Returns True if the object is contained within this Session. """ if isinstance(other, Scan): for scan in self._scans: if scan == other: return True return False raise TypeError("Can only determine if a Scan is contained.") def __iter__(self): return iter(self._scans) def __repr__(self): return '<Session, ID: {0}, {1} scan{2}, @ {3}>'.format( self.ID, len(self.scans), ('s' if len(self.scans) > 1 else ''), self.path) def __str__(self): output = [] output.append('ID: {0}'.format(self.ID)) output.append('Number of scans: {0}'.format(len(self.scans))) return '\n'.join(output)
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/editor/README
------------------------------------------------------------------------------- dojox.editor ------------------------------------------------------------------------------- Version 0.9 Release date: 9/14/2009 ------------------------------------------------------------------------------- Project state: experimental, beta, stable ------------------------------------------------------------------------------- Credits Mike Wilcox - Author Jared Jurkiewicz - Author (PrettyPrint, PageBreak, ShowBlockNodes, Preview, Save, ToolbarLineBreak, InsertEntity, NormalizeIndentOutdent, Breadcrumb, FindReplace, CollapsibleToolbar, Blockquote, PasteFromWord, InsertAnchor, TextColor, NormalizeStyle, StatusBar, SafePaste) Dustin Machi - Technical Assistance David Schwartz and Gu Yi He (IBM) - Contributed enhancements to the look and feel of FindReplace, as well as behavioral improvements. Eldon (IBM, CCLA) - LocalImage, AutoUrlLink, TablePluginsColorCell - dojox.widget.ColorPicker, ResizeTableColumn, AutoSave, SpellCheck ------------------------------------------------------------------------------- Project description Space for extensions and additional plugins for dijit.Editor. The project currently contains the following plugins: dojox.editor.plugins.TablePlugins: Status: Experimental. The Table Plugins provide a mechanism for editing tables withing the dijit.Editor. This plugin is experimental and does not work correctly in all dojo supported browsers. dojox.editor.plugins.UploadImage: Status: Experimental. The UploadImage plugin makes use of the dojox upload widgets to provide a mechanism to upload images to your server for use in the editor. dojox.editor.plugins.PrettyPrint: Status: Supported (stable). The PrettyPrint plugin provides a mechanism by which the output from editor.getValue()/editor.attr("value") is nicely formatted. Optional format parameters are how many spaces to indent by (default is tab), the maximum text line length (not including indent), and what characters in text strings should be encoded to their &<enc>; representation. dojox.editor.plugins.PageBreak: Status: Supported (stable). A simple plugin that allows you to insert 'page breaks' into the doc being edited. These page break styles will then cause the document to break to another page when printed. dojox.editor.plugins.ShowBlockNodes: Status: Supported (stable). A simple plugin that allows you to toggle on and off a CSS 'view' of how the page is laid out in terms of the block nodes used for its formatting. dojox.editor.plugins.Save: Status: Supported (beta). A simple plugin that allows you to POST the content of the editor back to a URL. dojox.editor.plugins.Preview: Status: Supported (beta). A simple plugin that allows you to display the content of the editor in a new window and apply a set of styles to it so you can see how content will look with various styles applied. It is likely this plugin will still evolve a bit. dojox.editor.plugins.ToolbarLineBreak: Status: Supported (stable). An extremely simple plugin that allows you to 'linebreak' the dijit toolbar so that really long toolbars for editor (lots of plugins enabled), can be broken up into multiple rows. dojox.editor.plugins.InsertEntity: Status: Experimental (unsupported). A plugin that enables the ability to insert HTML/XML entity characters into a page. These are often called 'symbols'. The set it provides are the basic latin (8859) set and a portion of greek symbols common to mathematics. It has been marked experimental as it is likely this plugin will evolve a bit. dojox.editor.plugins.NormalizeIndentOutdent: Status: Experimental (unsupported). A plugin that normalizes the behavior of indent/outdent to use margin styles instead of <blockquote> tags. Also fixes indent/outdent of lists to work properly. This is new and has been tested, but not extensively. Therefore it is currently classed as experimental. dojox.editor.plugins.Breadcrumb: Status: Experimental (unsupported). A plugin that adds a breadcrumb toolbar to the bottom of the editor. Useful for seeing where you aren and what operations you can perform. This is new and has been tested, but not extensively. Therefore it is currently classed as experimental. dojox.editor.plugins.FindReplace: Status: Experimental (unsupported). A plugin that adds a togglable Find/Replace toolbar to the editor. Useful for searching and replacing text strings in the editor content. Only works on FF, IE, and WebKit. No Opera support. This is new and has been tested, but not extensively. Therefore it is currently classed as experimental. dojox.editor.plugins.CollapsibleToolbar: Status: Supported (Stable). A plugin that modified the header node of the editor so that it is 'collapsible'. Meaning that it can be closed (reduced), and reopened. Useful for increasing editor real-estate. dojox.editor.plugins.Blockquote: Status: Supported (Stable). A plugin that puts a button on the toolbar that allows users to select text for a semantic 'blockquote' tag-wrap action. It toggles on and off during state changes to determine if the current section is contained within a blockquote. dojox.editor.plugins.PasteFromWord: Status: Beta (unsupported). A plugin that puts a button that opens a dialog to paste in content from Word and similar programs like wordpad. It will then filter out extraneous and bad html from the content before injecting it into the RTE. Experimental as the filter list may not be complete yet. Feedback is welcome and appreciated. Filters will be updated based on it. dojox.editor.plugins.InsertAnchor: Status: Stable (supported). A plugin that allows anchor points to be inserted into the document being edited. The anchors are styled in the doc to make them easily visible/editable in the document. dojox.editor.plugins.TextColor: Status: Experimental (unsupported). A plugin that makes use of the dojox.widget.ColorPicker widget in lieu of the dijit.ColorPalette. dojox.editor.plugins.NormalizeStyle: Status: Experimental (unsupported). A plugin that tries to normalize the output from the editor as either CSS styled or semantic (<b>, <i>, etc) style. dojox.editor.plugins.StatusBar: Status: Experimental (unsupported). A plugin that adds a status bar and an optional resize handle to the footer of the editor. dojox.editor.plugins.LocalImage Status: Beta A plugin that adds local image upload and edit capability to the editor. dojox.editor.plugins.AutoUrlLink Status: Experimental (Unsupported) A plugin that adds auto url link creation capability as a headless plugin to the editor dojox.editor.plugins.ResizeColumnPlugin Status: Experimental (Unsupported) A plugin that adds column resize to the editor table plugins. dojox.editor.plugins.AutoSave Status: Experimental (Unsupported) A plugin that provides 'auto-save' capablity, eg, post back to some url at an interval. dojox.editor.plugins.SpellCheck Status: Experimental (Unsupported) A plugin that provides server-side spell-check support. dojox.editor.plugins.SafePaste Status: Beta (Supported) A plugin that provides a safer paste function to the editor. It strips out script tags, tries to fix up odd input from Word, Wordpad, etc. Very similar to PasteFromWord except that it takes complete control of paste in dijit.Editor instead of being an alternate paste icon. ------------------------------------------------------------------------------- Dependencies: dijit dojox.form dojox.html.format dojox.widget.ColorPicker dojox.layout.ResizeHandle ------------------------------------------------------------------------------- Documentation The plugins directory contains extensions which work with dijit.Editor. See also: http://dojotoolkit.org/reference-guide/dojox/editor/plugins/TablePlugins.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/PrettyPrint.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/PageBreak.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/ShowBlockNodes.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/Preview.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/Save.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/ToolbarLineBreak.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/InsertEntity.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/NormalizeIndentOutdent.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/Breadcrumb.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/FindReplace.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/CollapsibleToolbar.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/Blockquote.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/PasteFromWord.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/InsertAnchor.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/TextColor.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/NormalizeStyle.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/StatusBar.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/LocalImage.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/AutoUrlLink.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/ResizeTableColumn.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/AutoSave.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/SpellCheck.html http://dojotoolkit.org/reference-guide/dojox/editor/plugins/SafePaste.html .html ------------------------------------------------------------------------------- Plugin Installation instructions Get dojo and dijit from svn. Include the Editor and plugins in your page: dojo.require("dijit.Editor"); For the TablePlugins: dojo.require("dojox.editor.plugins.TablePlugins"); and CSS: <link href="[path]dojox/editor/plugins/resources/editorPlugins.css" type="text/css" rel="stylesheet" /> For the UploadImage plugin: dojo.require("dojox.editor.plugins.UploadImage"); and CSS: <link href="[path]dojox/editor/plugins/resources/editorPlugins.css" type="text/css" rel="stylesheet" /> <link href="[path]dojox/form/resources/FileInput.css" type="text/css" rel="stylesheet" /> For the PrettyPrint plugin: dojo.require("dojox.editor.plugins.PrettyPrint"); and CSS: No CSS required. For the PageBreak plugin: dojo.require("dojox.editor.plugins.PageBreak"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/PageBreak.css" type="text/css" rel="stylesheet" /> For the ShowBlockNodes plugin: dojo.require("dojox.editor.plugins.ShowBockNodes"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/ShowBlockNodes.css" type="text/css" rel="stylesheet" /> For the Preview plugin: dojo.require("dojox.editor.plugins.Preview"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/Preview.css" type="text/css" rel="stylesheet" /> For the Save plugin: dojo.require("dojox.editor.plugins.Save"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/Save.css" type="text/css" rel="stylesheet" /> For the ToolbarLineBreak plugin: dojo.require("dojox.editor.plugins.ToolbarLineBreak"); and CSS: No CSS required. For the InsertEntity plugin: dojo.require("dojox.editor.plugins.InsertEntity"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/InsertEntity.css" type="text/css" rel="stylesheet" /> For the NormalizeIndentOutdent plugin: dojo.require("dojox.editor.plugins.NormalizeIndentOutdent"); and CSS: No CSS required. For the Breadcrumb plugin: dojo.require("dojox.editor.plugins.Breadcrumb"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/Breadcrumb.css" type="text/css" rel="stylesheet" /> For the FindReplace plugin: dojo.require("dojox.editor.plugins.FindReplace"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/FindReplace.css" type="text/css" rel="stylesheet" /> For the CollapsibleToolbar plugin: dojo.require("dojox.editor.plugins.CollapsibleToolbar"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/CollapsibleToolbar.css" type="text/css" rel="stylesheet" /> For the Blockquote plugin: dojo.require("dojox.editor.plugins.Blockquote"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/Blockquote.css" type="text/css" rel="stylesheet" /> For the PasteFromWord plugin: dojo.require("dojox.editor.plugins.PasteFromWord"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/PasteFromWord.css" type="text/css" rel="stylesheet" /> For the InsertAnchor plugin: dojo.require("dojox.editor.plugins.InsertAnchor"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/InsertAnchor.css" type="text/css" rel="stylesheet" /> For the TextColor plugin: dojo.require("dojox.editor.plugins.TextColor"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/TextColor.css" type="text/css" rel="stylesheet" /> For the NormalizeStyle plugin: dojo.require("dojox.editor.plugins.NormalizeStyle"); and CSS: No CSS required. For the StatusBar plugin: dojo.require("dojox.editor.plugins.StatusBar"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/StatusBar.css" type="text/css" rel="stylesheet" /> For the LocalImage plugin: dojo.require("dojox.editor.plugins.LocalImage"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/LocalImage.css" type="text/css" rel="stylesheet" /> For the AutoUrlLink plugin: dojo.require("dojox.editor.plugins.AutoUrlLink"); and CSS: No CSS required. For the ResizeTableColumn plugin: dojo.require("dojox.editor.plugins.ResizeTableColumn"); and CSS: No CSS required in addition to the table plugins css. For the AutoSave plugin: dojo.require("dojox.editor.plugins.AutoSave"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/AutoSave.css" type="text/css" rel="stylesheet" /> For the SpellCheck plugin: dojo.require("dojox.editor.plugins.SpellCheck"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/SpellCheck.css" type="text/css" rel="stylesheet" /> For the SafePaste plugin: dojo.require("dojox.editor.plugins.SafePaste"); and CSS: <link href="[path]dojox/editor/plugins/resources/css/SafePaste.css" type="text/css" rel="stylesheet" /> See tests for examples: dojox/editor/tests/editorTablePlugs.html dojox/editor/tests/editorUploadPlug.html dojox/editor/tests/editorPrettyPrint.html dojox/editor/tests/editorPageBreak.html dojox/editor/tests/editorShowBlockNodes.html dojox/editor/tests/editorPreview.html dojox/editor/tests/editorSave.html dojox/editor/tests/editorToolbarLineBreak.html dojox/editor/tests/editorInsertEntity.html dojox/editor/tests/editorNormalizeIndentOutdent.html dojox/editor/tests/editorBreadcrumb.html dojox/editor/tests/editorFindReplace.html dojox/editor/tests/editorCollapsibleToolbar.html dojox/editor/tests/editorBlockquote.html dojox/editor/tests/editorPasteFromWord.html dojox/editor/tests/editorInsertAnchor.html dojox/editor/tests/editorTextColor.html dojox/editor/tests/editorNormalizeStyle.html dojox/editor/tests/editorStatusBar.html dojox/editor/tests/editorLocalImage.html dojox/editor/tests/editorAutoUrlLink.html dojox/editor/tests/editorResizeTableColumn.html dojox/editor/tests/editorAutoSave.html dojox/editor/tests/editorSpellCheck.html dojox/editor/tests/editorSafePaste.html dojox/editor/tests/testPluginsAll.html
PypiClean
/Dulcinea-0.11.tar.gz/Dulcinea-0.11/lib/user.py
from dulcinea.base import DulcineaPersistent from dulcinea.sort import lexical_sort from dulcinea.spec import add_getters_and_setters, sequence, string from dulcinea.spec import spec, require, mapping, init, either from durus.persistent import Persistent from durus.persistent_dict import PersistentDict from durus.persistent_set import PersistentSet from quixote.util import randbytes import re, sha, binascii def hash_password(password): """Apply a one way hash function to a password and return the result.""" return sha.new(password).hexdigest() class Permissions (PersistentDict): data_is = {string:sequence(either(Persistent, True), PersistentSet)} def grant(self, permission, granter): require(permission, string) require(granter, either(Persistent, True)) if permission not in self: self[permission] = PersistentSet([granter]) else: self[permission].add(granter) def ungrant(self, permission, granter): require(permission, string) require(granter, either(Persistent, True)) if self.is_granted(permission, granter): self.data[permission].remove(granter) if len(self.data[permission]) == 0: del self.data[permission] def is_granted(self, permission, granter): return granter in self.get(permission, []) class DulcineaUser(DulcineaPersistent): """ a registered user. """ global_permissions = { "act-as": "Allow to act as another user.", "create-users": "Allow the creation of other users.", "manage-permissions": "Allow changing of permissions.", "staff": ("Is a member of the staff, with all of the privileges and " "responsibilities thereunto appertaining."), "system": "Allow to do things normally done by the software system.", } user_id_re = re.compile('^[-A-Za-z0-9_@.]*$') id_is = spec( string, "unique identifier for this user") password_hash_is = spec( (string, None), "the hashed version of the user's password, created using " "the hash_password function") email_is = (string, None) permissions_is = Permissions def __init__(self, user_id=None): init(self, permissions=Permissions()) if user_id is not None: self.set_id(user_id) def __str__(self): return self.id or "*no id*" format = format_realname = __str__ # subclasses should override def get_key(self): """ used for forming component representing this user in URLs """ return self.get_id() def set_id(self, user_id): require(user_id, string) assert self.id is None, "'id' may only be set once" if not self.user_id_re.match(user_id): raise ValueError( 'Invalid user ID %r: can only contain ' 'letters, numbers, and "-_@."' % user_id) self.id = user_id def set_password(self, new_password, check=True): """Set the user's password to 'new_password'.""" if check and self.check_new_password(new_password) != "": raise ValueError, 'invalid password' self.password_hash = hash_password(new_password) def valid_password(self, password): """Return true if the provided password is correct.""" return self.password_hash == hash_password(password) def generate_password (self, length=6): """Set the password to a random value and return the new password.""" password = binascii.b2a_base64(binascii.unhexlify(randbytes(length))) password = password[:length] self.set_password(password) return password def check_new_password(self, new_password): """(string) -> string Check if a new password is valid. Returns the empty string if the password is okay otherwise returns a string that describes what is wrong with the entered password. """ return "" def format_realname(self): return '' def is_null(self): return self.id == '' def is_disabled(self): return self.password_hash is None def __nonzero__(self): return not self.is_null() def is_system(self): return self.id == 'SYSTEM' def is_admin(self): return self.is_granted('staff') def is_granted(self, permission, granter=True): return self.get_permissions().is_granted(permission, granter) def can_manage_permissions(self): return self.is_granted('manage-permissions') add_getters_and_setters(DulcineaUser) class DulcineaUserDatabase(DulcineaPersistent): """ Class to hold all users in the system. User IDs are always looked up in the user database, so you will generally not be able to use a user until it has been added to the user database. """ users_is = spec( mapping({string:DulcineaUser}, PersistentDict), "all known users") motd_is = spec( string, "message-of-the-day") user_class = DulcineaUser def __init__(self): self.users = PersistentDict() self.motd = '' def get_all_users(self): return self.users.values() def __iter__(self): return self.users.itervalues() def get_users(self, sort=0): users = [user for user in self.users.itervalues() if not (user.is_null() or user.is_system() or user.is_disabled())] if sort: users = lexical_sort(users) return users def get_disabled_users(self, sort=0): users = [user for user in self.users.itervalues() if user.is_disabled() and not (user.is_null() or user.is_system())] if sort: users = lexical_sort(users) return users def get_matching_user(self, identifier): """(identifier : string) -> DulcineaUser | None Return a user with matching id or email address, or None if no such user is found. """ user = self.get_user(identifier) if user: return user elif '@' in identifier: identifier = identifier.lower() for user in self.users.itervalues(): if (user.get_email() or '').lower() == identifier: return user return None def get_user(self, user_id): """Return the User object with id 'user_id', or None if no such user. """ return self.users.get(user_id) def __getitem__(self, user_id): return self.users[user_id] def add_user(self, user): """Add User object 'user' to the user database. """ assert not self.users.has_key(user.get_id()) self.users[user.id] = user def get_admin(self): """Subclasses should override to return a PermissionManager """ return None def get_null_user(self): user = self.users.get('') if user is None: user = self.user_class(user_id='') self.add_user(user) return user def get_motd(self): return self.motd def set_motd(self, motd): self.motd = motd or '' def gen_users_granted(self, permission, granter=True): for user in self: if user.is_granted(permission, granter): yield user
PypiClean
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/sites/sites/filelist.py
import datetime import re from loguru import logger from sqlalchemy import Column, DateTime, Unicode from flexget import db_schema, plugin from flexget.entry import Entry from flexget.event import event from flexget.manager import Session from flexget.utils.database import json_synonym from flexget.utils.requests import RequestException, TimedLimiter from flexget.utils.requests import Session as RequestSession from flexget.utils.soup import get_soup from flexget.utils.tools import parse_filesize logger = logger.bind(name='filelist') Base = db_schema.versioned_base('filelist', 0) requests = RequestSession() requests.add_domain_limiter(TimedLimiter('filelist.ro', '2 seconds')) BASE_URL = 'https://filelist.ro/' CATEGORIES = { 'all': 0, 'anime': 24, 'audio': 11, 'cartoons': 15, 'docs': 16, 'games console': 10, 'games pc': 9, 'linux': 17, 'misc': 18, 'mobile': 22, 'movies 3d': 25, 'movies 4k': 6, 'movies 4k blueray': 26, 'movies bluray': 20, 'movies dvd': 2, 'movies dvd-ro': 3, 'movies hd': 4, 'movies hd-ro': 19, 'movies sd': 1, 'series 4k': 27, 'series hd': 21, 'series sd': 23, 'software': 8, 'sport': 13, 'tv': 14, 'videoclip': 12, 'xxx': 7, } SORTING = {'hybrid': 0, 'relevance': 1, 'date': 2, 'size': 3, 'snatches': 4, 'peers': 5} SEARCH_IN = {'both': 0, 'title': 1, 'description': 2} class FileListCookie(Base): __tablename__ = 'filelist_cookie' username = Column(Unicode, primary_key=True) _cookie = Column('cookie', Unicode) cookie = json_synonym('_cookie') expires = Column(DateTime) class SearchFileList: """ FileList.ro search plugin. """ schema = { 'type': 'object', 'deprecated': 'plugin filelist is deprecated, please consider using plugin filelist_api', 'properties': { 'username': {'type': 'string'}, 'password': {'type': 'string'}, 'passkey': {'type': 'string'}, 'category': {'type': 'string', 'enum': list(CATEGORIES.keys()), 'default': 'all'}, 'order_by': {'type': 'string', 'enum': list(SORTING.keys()), 'default': 'hybrid'}, 'order_ascending': {'type': 'boolean', 'default': False}, 'search_in': {'type': 'string', 'enum': list(SEARCH_IN.keys()), 'default': 'both'}, 'include_dead': {'type': 'boolean', 'default': False}, }, 'required': ['username', 'password', 'passkey'], 'additionalProperties': False, } errors = False def get(self, url, params, username, password, force=False): """ Wrapper to allow refreshing the cookie if it is invalid for some reason :param str url: :param list params: :param str username: :param str password: :param bool force: flag used to refresh the cookie forcefully ie. forgo DB lookup :return: """ cookies = self.get_login_cookie(username, password, force=force) response = requests.get(url, params=params, cookies=cookies) if 'login.php' in response.url: if self.errors: raise plugin.PluginError( 'FileList.ro login cookie is invalid. Login page received?' ) self.errors = True # try again response = self.get(url, params, username, password, force=True) else: self.errors = False return response def get_login_cookie(self, username, password, force=False): """ Retrieves login cookie :param str username: :param str password: :param bool force: if True, then retrieve a fresh cookie instead of looking in the DB :return: """ if not force: with Session() as session: saved_cookie = ( session.query(FileListCookie) .filter(FileListCookie.username == username.lower()) .first() ) if ( saved_cookie and saved_cookie.expires and saved_cookie.expires >= datetime.datetime.now() ): logger.debug('Found valid login cookie') return saved_cookie.cookie url = BASE_URL + 'takelogin.php' try: # get validator token response = requests.get(BASE_URL + 'login.php') soup = get_soup(response.content) login_validator = soup.find("input", {"name": "validator"}) if not login_validator: raise plugin.PluginError('FileList.ro could not get login validator') logger.debug('Login Validator: {}'.format(login_validator.get('value'))) logger.debug('Attempting to retrieve FileList.ro cookie') response = requests.post( url, data={ 'username': username, 'password': password, 'validator': login_validator.get('value'), 'login': 'Log in', 'unlock': '1', }, timeout=30, ) except RequestException as e: raise plugin.PluginError('FileList.ro login failed: %s' % e) if 'https://filelist.ro/my.php' != response.url: raise plugin.PluginError( 'FileList.ro login failed: Your username or password was incorrect.' ) with Session() as session: expires = None for c in requests.cookies: if c.name == 'pass': expires = c.expires if expires: expires = datetime.datetime.fromtimestamp(expires) logger.debug('Saving or updating FileList.ro cookie in db') cookie = FileListCookie( username=username.lower(), cookie=dict(requests.cookies), expires=expires ) session.merge(cookie) return cookie.cookie @plugin.internet(logger) def search(self, task, entry, config): """ Search for entries on FileList.ro """ entries = [] params = { 'cat': CATEGORIES[config['category']], 'incldead': int(config['include_dead']), 'order_by': SORTING[config['order_by']], 'searchin': SEARCH_IN[config['search_in']], 'asc': int(config['order_ascending']), } for search_string in entry.get('search_strings', [entry['title']]): params['search'] = search_string logger.debug('Using search params: {}', params) try: page = self.get( BASE_URL + 'browse.php', params, config['username'], config['password'] ) logger.debug('requesting: {}', page.url) except RequestException as e: logger.error('FileList.ro request failed: {}', e) continue soup = get_soup(page.content) for result in soup.findAll('div', attrs={'class': 'torrentrow'}): e = Entry() torrent_info = result.findAll('div', attrs={'class': 'torrenttable'}) # genres genres = torrent_info[1].find('font') if genres: genres = genres.text.lstrip('[').rstrip(']').replace(' ', '') genres = genres.split('|') tags = torrent_info[1].findAll('img') freeleech = False internal = False for tag in tags: if tag.get('alt', '').lower() == 'freeleech': freeleech = True if tag.get('alt', '').lower() == 'internal': internal = True title = torrent_info[1].find('a').get('title') # this is a dirty fix to get the full title since their developer is a moron if re.match(r"\<img src=\'.*\'\>", title): title = torrent_info[1].find('b').text # if the title is shortened, then do a request to get the full one :( if title.endswith('...'): url = BASE_URL + torrent_info[1].find('a')['href'] try: request = self.get(url, {}, config['username'], config['password']) except RequestException as e: logger.error('FileList.ro request failed: {}', e) continue title_soup = get_soup(request.content) title = title_soup.find('div', attrs={'class': 'cblock-header'}).text e['title'] = title e['url'] = ( BASE_URL + torrent_info[3].find('a')['href'] + '&passkey=' + config['passkey'] ) e['content_size'] = parse_filesize(torrent_info[6].find('font').text) e['torrent_snatches'] = int( torrent_info[7] .find('font') .text.replace(' ', '') .replace('times', '') .replace(',', '') ) e['torrent_seeds'] = int(torrent_info[8].find('span').text) e['torrent_leeches'] = int(torrent_info[9].find('span').text) e['torrent_internal'] = internal e['torrent_freeleech'] = freeleech if genres: e['torrent_genres'] = genres entries.append(e) return entries @event('plugin.register') def register_plugin(): plugin.register(SearchFileList, 'filelist', interfaces=['search'], api_ver=2)
PypiClean
/MXFusion-0.3.1.tar.gz/MXFusion-0.3.1/mxfusion/components/distributions/gp/kernels/rbf.py
from .stationary import StationaryKernel class RBF(StationaryKernel): """ Radial Basis Function kernel, aka squared-exponential, exponentiated quadratic or Gaussian kernel: .. math:: k(r^2) = \\sigma^2 \\exp \\bigg(- \\frac{1}{2} r^2 \\bigg) :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) :type input_dim: int :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual dimensions. :type ARD: boolean :param variance: the initial value for the variance parameter (scalar), which scales the whole covariance matrix. :type variance: float or MXNet NDArray :param lengthscale: the initial value for the lengthscale parameter. :type lengthscale: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 :param ctx: the mxnet context (default: None/current context). :type ctx: None or mxnet.cpu or mxnet.gpu """ broadcastable = True def __init__(self, input_dim, ARD=False, variance=1., lengthscale=1., name='rbf', active_dims=None, dtype=None, ctx=None): super(RBF, self).__init__( input_dim=input_dim, ARD=ARD, variance=variance, lengthscale=lengthscale, name=name, active_dims=active_dims, dtype=dtype, ctx=ctx) def _compute_K(self, F, X, lengthscale, variance, X2=None): """ The internal interface for the actual covariance matrix computation. :param F: MXNet computation type <mx.sym, mx.nd>. :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter (scalar), which scales the whole covariance matrix. :type variance: MXNet NDArray or MXNet Symbol :param lengthscale: the lengthscale parameter. :type lengthscale: MXNet NDArray or MXNet Symbol :return: The covariance matrix. :rtype: MXNet NDArray or MXNet Symbol """ R2 = self._compute_R2(F, X, lengthscale, variance, X2=X2) return F.exp(R2 / -2) * F.expand_dims(variance, axis=-1)
PypiClean
/GxSphinx-1.0.0.tar.gz/GxSphinx-1.0.0/sphinx/builders/latex/constants.py
from typing import Any, Dict PDFLATEX_DEFAULT_FONTPKG = r''' \usepackage{times} \expandafter\ifx\csname T@LGR\endcsname\relax \else % LGR was declared as font encoding \substitutefont{LGR}{\rmdefault}{cmr} \substitutefont{LGR}{\sfdefault}{cmss} \substitutefont{LGR}{\ttdefault}{cmtt} \fi \expandafter\ifx\csname T@X2\endcsname\relax \expandafter\ifx\csname T@T2A\endcsname\relax \else % T2A was declared as font encoding \substitutefont{T2A}{\rmdefault}{cmr} \substitutefont{T2A}{\sfdefault}{cmss} \substitutefont{T2A}{\ttdefault}{cmtt} \fi \else % X2 was declared as font encoding \substitutefont{X2}{\rmdefault}{cmr} \substitutefont{X2}{\sfdefault}{cmss} \substitutefont{X2}{\ttdefault}{cmtt} \fi ''' XELATEX_DEFAULT_FONTPKG = r''' \setmainfont{FreeSerif}[ Extension = .otf, UprightFont = *, ItalicFont = *Italic, BoldFont = *Bold, BoldItalicFont = *BoldItalic ] \setsansfont{FreeSans}[ Extension = .otf, UprightFont = *, ItalicFont = *Oblique, BoldFont = *Bold, BoldItalicFont = *BoldOblique, ] \setmonofont{FreeMono}[ Extension = .otf, UprightFont = *, ItalicFont = *Oblique, BoldFont = *Bold, BoldItalicFont = *BoldOblique, ] ''' XELATEX_GREEK_DEFAULT_FONTPKG = (XELATEX_DEFAULT_FONTPKG + '\n\\newfontfamily\\greekfont{FreeSerif}' + '\n\\newfontfamily\\greekfontsf{FreeSans}' + '\n\\newfontfamily\\greekfonttt{FreeMono}') LUALATEX_DEFAULT_FONTPKG = XELATEX_DEFAULT_FONTPKG DEFAULT_SETTINGS = { 'latex_engine': 'pdflatex', 'papersize': '', 'pointsize': '', 'pxunit': '.75bp', 'classoptions': '', 'extraclassoptions': '', 'maxlistdepth': '', 'sphinxpkgoptions': '', 'sphinxsetup': '', 'fvset': '\\fvset{fontsize=\\small}', 'passoptionstopackages': '', 'geometry': '\\usepackage{geometry}', 'inputenc': '', 'utf8extra': '', 'cmappkg': '\\usepackage{cmap}', 'fontenc': '\\usepackage[T1]{fontenc}', 'amsmath': '\\usepackage{amsmath,amssymb,amstext}', 'multilingual': '', 'babel': '\\usepackage{babel}', 'polyglossia': '', 'fontpkg': PDFLATEX_DEFAULT_FONTPKG, 'substitutefont': '', 'textcyrillic': '', 'textgreek': '\\usepackage{textalpha}', 'fncychap': '\\usepackage[Bjarne]{fncychap}', 'hyperref': ('% Include hyperref last.\n' '\\usepackage{hyperref}\n' '% Fix anchor placement for figures with captions.\n' '\\usepackage{hypcap}% it must be loaded after hyperref.\n' '% Set up styles of URL: it should be placed after hyperref.\n' '\\urlstyle{same}'), 'contentsname': '', 'extrapackages': '', 'preamble': '', 'title': '', 'release': '', 'author': '', 'releasename': '', 'makeindex': '\\makeindex', 'shorthandoff': '', 'maketitle': '\\sphinxmaketitle', 'tableofcontents': '\\sphinxtableofcontents', 'atendofbody': '', 'printindex': '\\printindex', 'transition': '\n\n\\bigskip\\hrule\\bigskip\n\n', 'figure_align': 'htbp', 'tocdepth': '', 'secnumdepth': '', } # type: Dict[str, Any] ADDITIONAL_SETTINGS = { 'pdflatex': { 'inputenc': '\\usepackage[utf8]{inputenc}', 'utf8extra': ('\\ifdefined\\DeclareUnicodeCharacter\n' '% support both utf8 and utf8x syntaxes\n' ' \\ifdefined\\DeclareUnicodeCharacterAsOptional\n' ' \\def\\sphinxDUC#1{\\DeclareUnicodeCharacter{"#1}}\n' ' \\else\n' ' \\let\\sphinxDUC\\DeclareUnicodeCharacter\n' ' \\fi\n' ' \\sphinxDUC{00A0}{\\nobreakspace}\n' ' \\sphinxDUC{2500}{\\sphinxunichar{2500}}\n' ' \\sphinxDUC{2502}{\\sphinxunichar{2502}}\n' ' \\sphinxDUC{2514}{\\sphinxunichar{2514}}\n' ' \\sphinxDUC{251C}{\\sphinxunichar{251C}}\n' ' \\sphinxDUC{2572}{\\textbackslash}\n' '\\fi'), }, 'xelatex': { 'latex_engine': 'xelatex', 'polyglossia': '\\usepackage{polyglossia}', 'babel': '', 'fontenc': ('\\usepackage{fontspec}\n' '\\defaultfontfeatures[\\rmfamily,\\sffamily,\\ttfamily]{}'), 'fontpkg': XELATEX_DEFAULT_FONTPKG, 'textgreek': '', 'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0' '{\\leavevmode\\nobreak\\ }'), }, 'lualatex': { 'latex_engine': 'lualatex', 'polyglossia': '\\usepackage{polyglossia}', 'babel': '', 'fontenc': ('\\usepackage{fontspec}\n' '\\defaultfontfeatures[\\rmfamily,\\sffamily,\\ttfamily]{}'), 'fontpkg': LUALATEX_DEFAULT_FONTPKG, 'textgreek': '', 'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0' '{\\leavevmode\\nobreak\\ }'), }, 'platex': { 'latex_engine': 'platex', 'babel': '', 'classoptions': ',dvipdfmx', 'fontpkg': '\\usepackage{times}', 'textgreek': '', 'fncychap': '', 'geometry': '\\usepackage[dvipdfm]{geometry}', }, 'uplatex': { 'latex_engine': 'uplatex', 'babel': '', 'classoptions': ',dvipdfmx', 'fontpkg': '\\usepackage{times}', 'textgreek': '', 'fncychap': '', 'geometry': '\\usepackage[dvipdfm]{geometry}', }, # special settings for latex_engine + language_code ('xelatex', 'fr'): { # use babel instead of polyglossia by default 'polyglossia': '', 'babel': '\\usepackage{babel}', }, ('xelatex', 'zh'): { 'polyglossia': '', 'babel': '\\usepackage{babel}', 'fontenc': '\\usepackage{xeCJK}', }, ('xelatex', 'el'): { 'fontpkg': XELATEX_GREEK_DEFAULT_FONTPKG, }, } # type: Dict[Any, Dict[str, Any]] SHORTHANDOFF = r''' \ifdefined\shorthandoff \ifnum\catcode`\=\string=\active\shorthandoff{=}\fi \ifnum\catcode`\"=\active\shorthandoff{"}\fi \fi '''
PypiClean
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/seq_editer.py
import os import shutil import sys import csv from Bio import SeqIO from Bio.SeqRecord import SeqRecord from Bio.Seq import Seq from annogesiclib.seqmodifier import SeqModifier class SeqEditer(object): '''Edit the sequence if it is needed''' def _row_to_location(self, row, out_name): return({"ref_id": row[0], "target_id": "_".join([out_name, row[0]]), "datas": [{"ref_nt": row[3], "tar_nt": row[4], "position": row[1]}]}) def _import_data(self, mod_table_file, out_name): datas = [] first = True num_index = 0 fh = open(mod_table_file) for row in csv.reader(fh, delimiter="\t"): if row[0].startswith("#"): continue else: if first: datas.append(self._row_to_location(row, out_name)) pre_ref_id = row[0].strip() first = False else: if (row[0] == pre_ref_id): datas[num_index]["datas"].append( {"ref_nt": row[3].strip(), "tar_nt": row[4].strip(), "position": row[1].strip()}) else: datas.append(self._row_to_location(row, out_name)) num_index += 1 pre_ref_id = row[0].strip() fh.close() return datas def modify_seq(self, fasta_folder, mod_table_file, output_folder, out_name): datas = self._import_data(mod_table_file, out_name) for data in datas: seq = "" if (data["ref_id"] + ".fa") in os.listdir(fasta_folder): filename = os.path.join(fasta_folder, data["ref_id"] + ".fa") with open(filename, "r") as fasta: for line in fasta: line = line.strip() if len(line) != 0: if line[0] != ">": seq = seq + line seq_modifier = SeqModifier(seq) for change in data["datas"]: if change["ref_nt"] == "-": seq_modifier.insert( int(change["position"]), change["tar_nt"]) elif change["tar_nt"] == "-": seq_modifier.remove(int(change["position"]), len(change["ref_nt"])) else: seq_modifier.replace( int(change["position"]), change["tar_nt"]) record = SeqRecord(Seq(seq_modifier.seq())) record.id = data["target_id"] record.description = "" SeqIO.write(record, os.path.join( output_folder, record.id + ".fa"), "fasta") def modify_header(self, input_file): first = True tmp_file_path = input_file + "_TMP" output_fh = open(input_file + "_TMP", "w") with open(input_file, "r") as s_h: for line in s_h: line = line.strip() if first: first = False if (line[0] != ">"): print("Error: No proper header!!") sys.exit() if line.startswith(">"): mod = line.split("|") folder = input_file.split("/") folder = "/".join(folder[:-1]) if (len(mod) == 5) and (line[0] == ">"): new_header = ">%s" % (mod[3]) elif (len(mod) != 5) and (line[0] == ">"): new_header = line.split(" ")[0] elif (line[0] != ">"): print("Error: No proper header!!") sys.exit() line = new_header output_fh.write(line + "\n") output_fh.close() shutil.move(tmp_file_path, input_file)
PypiClean
/BaculaFS-0.1.7.tar.gz/BaculaFS-0.1.7/baculafs/FileSystem.py
__version__ = '0.1.7' import os import sys import stat import errno import copy import tempfile import shutil import threading import traceback import pexpect import fcntl import time import re import binascii from LogFile import * from Database import * from Catalog import * from SQL import * # pull in some spaghetti to make this stuff work without fuse-py being installed try: import _find_fuse_parts except ImportError: pass import fuse from fuse import Fuse if not hasattr(fuse, '__version__'): raise RuntimeError, \ "your fuse-py doesn't know of fuse.__version__, probably it's too old." fuse.fuse_python_api = (0, 2) fuse.feature_assert('stateful_files', 'has_init') def flag2mode(flags): ''' taken from python-fuse xmp.py example ''' md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] if flags | os.O_APPEND: m = m.replace('w', 'a', 1) return m def makedirs(path): ''' create path like mkdir -p taken from: http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python/600612#600612 ''' try: os.makedirs(path) except OSError, exc: if exc.errno == errno.EEXIST: pass else: raise def touch(fname, times = None): ''' touch file adapted from: http://stackoverflow.com/questions/1158076/implement-touch-using-python/1160227#1160227 ''' fhandle = open(fname, 'a') try: os.utime(fname, times) finally: fhandle.close() class FileSystem(Fuse) : null_stat = fuse.Stat(st_mode=stat.S_IFDIR | 0755, st_ino=0, st_dev=0, st_nlink=2, st_uid=0, st_gid=0, st_size=0, st_atime=0, st_mtime=0, st_ctime=0, st_blksize=0, st_rdev=0) bacula_stat_fields = ['st_dev', 'st_ino', 'st_mode', 'st_nlink', 'st_uid', 'st_gid', 'st_rdev', 'st_size', 'st_blksize', 'st_blocks', 'st_atime', 'st_mtime', 'st_ctime', 'st_linkfi', 'st_flags', 'st_streamid'] fuse_stat_fields = [attr for attr in dir(null_stat) if attr.startswith('st_')] xattr_prefix = 'user.baculafs.' xattr_fields = ['FileIndex', 'JobId', 'LStat', 'MD5'] xattr_fields_root = ['client', 'fileset', 'datetime', 'joblist', 'cache_prefix'] xattr_fields_bextract = ['path', 'volume', 'retries', 'state', 'pending', 'failures'] bextract_done = {'path': None, 'volume': None, 'retries': 0, 'state': 'idle'} def __init__(self, *args, **kw): ''' Initialize filesystem ''' self._extract_lock = threading.Lock() self._getattr_lock = threading.Lock() self._bextract_status_lock = threading.Lock() self._bextract_user_intervention_event = threading.Event() self._initialized = False # default option values self.logging = 'info' self.syslog = False self.driver = SQL.SQLITE3 self.database = None self.host = 'localhost' self.port = 0 self.username = 'bacula' self.password = None self.conf = '/etc/bacula/bacula-sd.conf' self.client = '' self.fileset = None self.device = 'FileStorage' self.datetime = None self.recent_job = False self.joblist = None self.cache_prefix = None self.user_cache_path = None self.cleanup = False self.move_root = False self.prefetch_attrs = False self.prefetch_regex = None self.prefetch_symlinks = False self.prefetch_recent = False self.prefetch_diff = None self.prefetch_difflist = None self.prefetch_list = None self.prefetch_everything = False self.batch_mode = False self.batch_list = False self.batch_bsr = False self.batch_extract = False self.use_ino = False self.max_ino = 0 self.dirs = { '/': { '': (FileSystem.null_stat,) } } self._bextract_status = copy.deepcopy(FileSystem.bextract_done) self._bextract_status['pending'] = 0 self._bextract_status['failures'] = 0 class File (FileSystem._File): def __init__(self2, *a, **kw): FileSystem._File.__init__(self2, self, *a, **kw) self.file_class = File Fuse.__init__(self, *args, **kw) def _split(self, path) : ''' os.path.split wrapper ''' head, tail = os.path.split(path) if head and not head.endswith('/') : head += '/' return head, tail def _bacula_stat(self, base64) : ''' Parse base64 encoded lstat info. Returns fuse.Stat object with subset of decoded values, and dictionary with full list of decoded values ''' st = fuse.Stat() lst = dict(zip(FileSystem.bacula_stat_fields, map(self.base64.decode, base64.split()))) for k in FileSystem.bacula_stat_fields : if k in FileSystem.fuse_stat_fields : setattr(st, k, lst[k]) return lst, st def _add_parent_dirs(self, path) : ''' add parent directories of path to dirs dictionary ''' head, tail = self._split(path[:-1]) if not head or head == path: return if not head in self.dirs : self.dirs[head] = { tail: (FileSystem.null_stat,) } elif not tail in self.dirs[head] : self.dirs[head][tail] = (FileSystem.null_stat,) self._add_parent_dirs(head) def _update_inodes(self, head) : ''' generate unique st_ino for each missing st_ino ''' for tail in self.dirs[head] : if self.dirs[head][tail][-1].st_ino == 0 : if len(self.dirs[head][tail]) == 1: self.dirs[head][tail] = (copy.deepcopy(FileSystem.null_stat),) self.max_ino += 1 self.dirs[head][tail][-1].st_ino = self.max_ino subdir = '%s%s/' % (head, tail) if subdir in self.dirs : self._update_inodes(subdir) def _extract(self, path_list) : ''' extract path list from storage, returns path list of extracted files ''' nitems = len(path_list) self._bextract_increment_counter('pending', nitems) # serialize extractions self._extract_lock.acquire() items = [] realpath_list = [] hardlink_targets = [] for path in path_list : realpath, symlinkinfo, volumes = self._find_volumes(path) realpath_list.append(realpath) if volumes : items.append((symlinkinfo, path, volumes)) # collect hard link targets hardlink_target = self._hardlink_target(path) if (hardlink_target and hardlink_target not in path_list and hardlink_target not in hardlink_targets) : hardlink_targets.append(hardlink_target) # add hardlink targets to list # bextract will fail to extract the hardlink if its target does not exist for path in hardlink_targets : realpath, symlinkinfo, volumes = self._find_volumes(path) if volumes : items.append((symlinkinfo, path, volumes)) if len(items) > 0 : rc, sig = self._bextract(items) # it seems that bextract does not restore mtime for symlinks # so we create a normal file with same mtime as stored symlink if rc == 0 and not self.batch_mode : for item in items : if item[0] : symlinkfile = item[0][0] symlinktime = item[0][1:] makedirs(os.path.dirname(symlinkfile)) touch(symlinkfile, symlinktime) self._extract_lock.release() self._bextract_increment_counter('pending', -nitems) return realpath_list def _hardlink_target(self, path) : ''' return hard link target of path if it is a hard link ''' head, tail = self._split(path) bs = self.dirs[head][tail][-2] jobid = self.dirs[head][tail][1] if bs['st_nlink'] > 1 and bs['st_linkfi'] > 0 : st_linkfi = bs['st_linkfi'] for file in self.catalog.files : if jobid == file[3] and st_linkfi == file[2] : hardlink_target = ('/' if not file[0].startswith('/') else '')+file[0]+file[1] return hardlink_target return None def _find_volumes(self, path) : ''' return list of volumes that contain path to be extracted, if the path has not been extracted yet ''' realpath = os.path.normpath(self.cache_path + path) symlinkpath = os.path.normpath(self.cache_symlinks + path) head, tail = self._split(path) # sanity check: path should not be a directory if tail == '': raise RuntimeError, 'trying to extract a directory %s' % path # check that path exists in catalog if head not in self.dirs or tail not in self.dirs[head] : return None, None, None # sanity check: path entry is incomplete if len(self.dirs[head][tail]) == 1 : raise RuntimeError, 'incomplete entry for path %s' % path # return if file has already been extracted bs = self.getattr(path) is_symlink = stat.S_ISLNK(bs.st_mode) found = False if os.path.exists(realpath) or os.path.lexists(realpath) : # make sure that stat info of realpath matches path s = os.lstat(realpath) conds = [getattr(s, attr) == getattr(bs, attr) for attr in ['st_mode', 'st_uid', 'st_gid', 'st_size', 'st_mtime']] if is_symlink : conds[-1] = (os.path.exists(symlinkpath) and bs.st_mtime == os.stat(symlinkpath).st_mtime) if all(conds) : return realpath, None, None # generate list of volumes for path fileindex, jobid = self.dirs[head][tail][0:2] jobs = [job for job in self.catalog.jobs if job[0] == jobid] volumes = [[volume[1], # 0-Volume volume[2], # 1-MediaType self.device, # 2-Device jobs[0][0], # 3-JobId jobs[0][1], # 4-VolSessionId jobs[0][2], # 5-VolSessionTime (volume[5] << 32) | volume[7], # 6-VolAddr: StartAddr (volume[6] << 32) | volume[8], # 7-VolAddr: EndAddr fileindex] # 8-FileIndex for volume in self.catalog.volumes if (volume[0] == jobid and volume[3] <= fileindex and fileindex <= volume[4])] return realpath, (symlinkpath, bs.st_atime, bs.st_mtime) if is_symlink else None, volumes def _bextract_set_status(self, status) : ''' thread safe modification of bextract status dict ''' self._bextract_status_lock.acquire() for key in status : self._bextract_status[key] = status[key] self._bextract_status_lock.release() def _bextract_increment_counter(self, counter, n) : ''' thread safe modification of bextract counters ''' self._bextract_status_lock.acquire() self._bextract_status[counter] += n self._bextract_status_lock.release() def _bextract_get_status(self) : ''' thread safe access to bextract status dict ''' self._bextract_status_lock.acquire() status = copy.deepcopy(self._bextract_status) self._bextract_status_lock.release() return status def _bextract_flock(self) : ''' lock the storage daemon configuration file ''' # we allow locking to fail, so as to allow # at least a single instance of baculafs, # even if we can't lock the sd conf file try : f = open(self.conf, 'r') fcntl.flock(f, fcntl.LOCK_EX) return f except : self.logger.warning(traceback.format_exc()) return None def _bextract_funlock(self, f) : ''' unlock the file f ''' if not f : return try : fcntl.flock(f, fcntl.LOCK_UN) f.close() except : self.logger.warning(traceback.format_exc()) def _bextract(self, items) : ''' extract list of items from Bacula storage device ''' if self.batch_list : for item in items : print item[1] if (not self.batch_bsr and not self.batch_extract) : return (0, 0) bsrpath = self._write_bsr(items) if self.batch_bsr : bsrfile = open(bsrpath, 'rt') for line in bsrfile : sys.stdout.write(line) sys.stdout.flush() bsrfile.close() if not self.batch_extract : return (0, 0) if self.batch_extract : makedirs(self.fuse_args.mountpoint) cmd = 'bextract -b "%s" -c "%s" "%s" "%s"' % (bsrpath, self.conf, self.device, self.cache_path if not self.batch_extract else self.fuse_args.mountpoint) self.logger.debug(cmd) self._bextract_set_status({'path': items[0][1], 'volume': items[0][-1][0][0], 'retries': 0, 'state': 'run'}) # we serialize calls to bextract across instances of baculafs # by locking the storage daemon configuration file # (note that this may not work over NFS) f = self._bextract_flock() child = pexpect.spawn(cmd) child.logfile = self.logfile #sys.stdout attempt = 0 missing = '' while True : # bextract either finishes or waits for a missing volume i = child.expect([self.fail_pattern, pexpect.EOF], timeout=None, searchwindowsize=200) self.logfile.flush(flush_tail = True) if i == 0 : # count retries if missing == child.match.groups()[0] : attempt += 1 self._bextract_set_status({'retries': attempt, 'state': '*user intervention required*'}) else : attempt = 1 missing = child.match.groups()[0] self._bextract_set_status({'volume': missing, 'retries': attempt, 'state': '*user intervention required*'}) # wait for user if not self._initialized : if self.loglevel != logging.DEBUG : sys.stdout.write('Mount Volume "%s" on device "%s" %s and press return when ready: ' % (missing, self.device, child.match.groups()[1])) sys.stdout.flush() sys.stdin.read(1) else : self.logger.error('Mount volume "%s" on device "%s" %s and run "attr -s baculafs.bextract.state -V run %s" when ready' % (missing, self.device, child.match.groups()[1], self.fuse_args.mountpoint)) self._bextract_user_intervention_event.clear() self._bextract_user_intervention_event.wait() self._bextract_user_intervention_event.clear() # retry self._bextract_set_status({'state': 'run'}) child.sendline('') else : child.close() break # unlock the sd configuration file self._bextract_funlock(f) self._bextract_set_status(FileSystem.bextract_done) if child.exitstatus or child.signalstatus : self.logger.error('extraction failed (bsr file: %s)' % bsrpath) self._bextract_increment_counter('failures', 1) return (child.exitstatus, child.signalstatus) def _group_by_volume(self, items) : ''' return items grouped by volume ''' # group volumes volumes = [] for item in items : for v in item[-1] : found = False findex = v[-1] for vindex in xrange(0,len(volumes)) : volume = volumes[vindex] if not any(map(cmp, v[:-1], volume[:-1])) : volume[-1].append(findex) found = True break if not found : volumes.append(v[:-1] + [[v[-1]]]) # compact list of file indices for volume in volumes : volume[-1] = list(set(volume[-1])) volume[-1].sort() l = len(volume[-1]) findex = volume[-1][0] findices = [(findex, findex)] for idx in volume[-1][1:] : next_idx = findices[-1][-1] + 1 if idx == next_idx : findices[-1] = (findices[-1][0], idx) else : findices.append((idx,idx)) volume[-1] = findices volume.append(l) # reorder volumes to ensure correct handling of # files spanning multiple volumes volumes.sort(cmp = lambda a,b : \ (cmp(a[3],b[3]) or cmp(a[8][-1][-1], b[8][0][0]))) return volumes def _write_bsr(self, items) : ''' generate bsr for items to be extracted ''' bsrfd, bsrpath = tempfile.mkstemp(suffix='.bsr', dir=self.cache_bsrpath, text=True) volumes = self._group_by_volume(items) for volume in volumes : os.write(bsrfd, 'Volume="%s"\n' % volume[0]) os.write(bsrfd, 'MediaType="%s"\n' % volume[1]) os.write(bsrfd, 'Device="%s"\n' % volume[2]) os.write(bsrfd, 'VolSessionId=%d\n' % volume[4]) os.write(bsrfd, 'VolSessionTime=%d\n' % volume[5]) if not self.bsr_compat : os.write(bsrfd, 'VolAddr=%d-%d\n' % (volume[6],volume[7])) for findex in volume[8] : if findex[0] == findex[1] : os.write(bsrfd, 'FileIndex=%d\n' % findex[0]) else : os.write(bsrfd, 'FileIndex=%d-%d\n' % findex) os.write(bsrfd, 'Count=%d\n' % volume[9]) os.close(bsrfd) return bsrpath def _match_stat(self, path, bs) : ''' determine if stat of path matches bs ''' found = False if os.path.exists(path) or os.path.lexists(path) : s = os.lstat(path) found = all([getattr(s, attr) == getattr(bs, attr) for attr in ['st_mode', 'st_uid', 'st_gid', 'st_size', 'st_mtime']]) return found def _setup_logging(self) : ''' initialize logging facility ''' # log messages are sent to both console and syslog # use -o logging=level to set the log level # use -o syslog to enable logging to syslog self.logger = logging.getLogger('BaculaFS') self.loglevel = LOGGING_LEVELS.get(self.logging, logging.NOTSET) self.logger.setLevel(self.loglevel) h = logging.StreamHandler() h.setLevel(self.loglevel) formatter = logging.Formatter("%(message)s") h.setFormatter(formatter) self.logger.addHandler(h) if self.syslog : try : h = logging.handlers.SysLogHandler('/dev/log') h.setLevel(self.loglevel) formatter = logging.Formatter("%(name)s: %(levelname)-8s - %(message)s") h.setFormatter(formatter) self.logger.addHandler(h) except : self.logger.warning(traceback.format_exc()) self.logfile = LogFile(self.logger, logging.DEBUG) def initialize(self, version) : ''' initialize database, catalog ''' self._setup_logging() # batch mode self.batch_mode = (self.batch_list or self.batch_bsr or self.batch_extract) # disable INFO level logging in batch mode if self.batch_mode and self.loglevel == logging.INFO : self.loglevel = logging.WARNING self.logger.setLevel(self.loglevel) self.logger.info('Populating file system ... ') # setup cache if self.user_cache_path : self.cache_prefix = self.user_cache_path else : self.cache_prefix = tempfile.mkdtemp(prefix='baculafs-') self.cache_path = os.path.normpath(self.cache_prefix + '/files') makedirs(self.cache_path) self.cache_bsrpath = os.path.normpath(self.cache_prefix + '/bsr') makedirs(self.cache_bsrpath) self.cache_symlinks = os.path.normpath(self.cache_prefix + '/symlinks') makedirs(self.cache_symlinks) # test for old version (2.x) of bacula self.bsr_compat = int(version[0]) < 3 if self.bsr_compat : self.logger.debug('Detected old Bacula: %s' % version) # test access to sd conf file open(self.conf, 'r').close() # init bextract failure pattren self.fail_pattern = 'Mount Volume "([^"]+)" on device "%s" (.*) and press return when ready:' % self.device # init database and catalog self.db = Database(self.driver, self.host, self.port, self.database, self.username, self.password, self.logger) self.catalog = Catalog(self.db) self.base64 = Base64() files = self.catalog.query(self.client, self.fileset, self.datetime, self.recent_job, self.joblist) # validated values self.client = self.catalog.client self.fileset = self.catalog.fileset[1] self.datetime = self.catalog.datetime # we don't need the database anymore self.db.close() prefetches = [] difflist = {} # validate prefetch conditions if self.prefetch_everything : self.prefetch_recent = False self.prefetch_regex = None self.prefetch_diff = None self.prefetch_difflist = None self.prefetch_list = None self.prefetch_symlinks = True if self.prefetch_regex : try : regex = re.compile(self.prefetch_regex) self.prefetch_attrs = True except : # bad regex: show traceback and ignore self.logger.warning(traceback.format_exc()) self.prefetch_regex = None if self.prefetch_diff : self.prefetch_diff = os.path.normpath(os.path.expanduser(self.prefetch_diff)) try : if os.path.isdir(self.prefetch_diff) : self.prefetch_symlinks = True else : self.prefetch_diff = None except : # can't access target directory: show traceback and ignore self.logger.warning(traceback.format_exc()) self.prefetch_diff = None if self.prefetch_difflist : self.prefetch_difflist = os.path.normpath(os.path.expanduser(self.prefetch_difflist)) try : difflistfile = sys.stdin if self.prefetch_difflist == '-' else open(self.prefetch_difflist, 'rt') for line in difflistfile.readlines(): date = ' '.join(line.split()[:5]) difflist[line[(len(date) + 1):].strip()] = time.strptime(date, '%a %b %d %H:%M:%S %Y') difflistfile.close() self.prefetch_symlinks = True except : # can't access/parse difflist: show traceback and ignore self.logger.warning(traceback.format_exc()) self.prefetch_difflist = None if self.prefetch_list : self.prefetch_list = os.path.normpath(os.path.expanduser(self.prefetch_list)) try : listfile = sys.stdin if self.prefetch_list == '-' else open(self.prefetch_list, 'rt') matchlist = [line.strip() for line in listfile.readlines()] listfile.close() self.prefetch_symlinks = True except : # can't access/parse list: show traceback and ignore self.logger.warning(traceback.format_exc()) self.prefetch_list = None if self.prefetch_recent : self.prefetch_symlinks = True if self.prefetch_symlinks : self.prefetch_attrs = True if 'use_ino' in self.fuse_args.optlist: self.use_ino = True self.prefetch_attrs = True # must figure out max st_ino for file in files : head = file[0] tail = file[1] # handle windows directories if not head.startswith('/') : head = '/'+head # make file entry if self.prefetch_attrs : entry = file[2:] + self._bacula_stat(file[-2]) # find max st_ino if self.use_ino: if entry[-1].st_ino > self.max_ino : self.max_ino = entry[-1].st_ino # detemine if we need to prefetch this entry filepath = head + tail if (not stat.S_ISDIR(entry[-1].st_mode) and (self.prefetch_everything or (self.prefetch_recent and file[3] == self.catalog.most_recent_jobid) or (self.prefetch_regex and regex.search(filepath)) or (self.prefetch_diff and not self._match_stat(self.prefetch_diff + filepath, entry[-1])) or (self.prefetch_difflist and (filepath[1:] not in difflist or difflist[filepath[1:]][:-1] != time.localtime(entry[-1].st_mtime)[:-1])) or (self.prefetch_list and filepath in matchlist) or (self.prefetch_symlinks and stat.S_ISLNK(entry[-1].st_mode)))) : prefetches.append(filepath) else : entry = file[2:] + (None,) # stat info placeholder # new directory if head not in self.dirs : self.dirs[head] = {} # add parent directories self._add_parent_dirs(head) # directories are added to their parents if head != '/' and tail == '' : head, tail = self._split(head[:-1]) # and finally self.dirs[head][tail] = entry # fix st_ino if self.use_ino: self._update_inodes('/') npf = len(prefetches) if npf > 0 : self.logger.info('Prefetching %d objects ... ' % npf) self._extract(prefetches) self.logger.debug('Cache directory is: %s' % self.cache_prefix) self.joblist = ' '.join([str(job[0]) for job in self.catalog.jobs]) self.logger.debug('Job ids in file system: %s' % self.joblist) self.logger.info('BaculaFS ready (%d files).' % len(files)) self._initialized = True def shutdown(self) : ''' remove cache directory if required ''' if self.cleanup and not self.user_cache_path and self.cache_prefix : self.logger.info('removing cache directory: %s' % self.cache_prefix) shutil.rmtree(self.cache_prefix, ignore_errors = True) def setxattr(self, path, name, value, flags): ''' set value of extended attribute we allow only setting user.baculafs.bextract.state on the root directory ''' if (path == '/' and name == FileSystem.xattr_prefix + 'bextract.state' and value == 'run') : self._bextract_user_intervention_event.set() else : return -errno.EOPNOTSUPP def getxattr(self, path, name, size): ''' get value of extended attribute baculafs exposes some filesystem attributes for the root directory (e.g. joblist, cache_prefix - see FileSystem.xattr_fields_root) and several other attributes for each file/directory that appears in the catalog (e.g. MD5, JobId - see FileSystem.xattr_fields) ''' head, tail = self._split(path) val = None n = name.replace(FileSystem.xattr_prefix, '') if path == '/' : if n in FileSystem.xattr_fields_root : val = str(getattr(self, n)) elif n.startswith('bextract.') : n = n.replace('bextract.', '') if n in FileSystem.xattr_fields_bextract : val = str(self._bextract_get_status()[n]) if (not val and head in self.dirs and tail in self.dirs[head] and len(self.dirs[head][tail]) != 1 and n in FileSystem.xattr_fields) : val = str(self.dirs[head][tail][FileSystem.xattr_fields.index(n)]) if n == 'MD5' and val != '0': l = len(val) val = binascii.b2a_hex(binascii.a2b_base64(val+'='*((l*3+8)/3-l)+'\n')) # padding # attribute not found if val == None : return -errno.ENODATA # We are asked for size of the value. if size == 0: return len(val) return val def listxattr(self, path, size): ''' list extended attributes ''' head, tail = self._split(path) xattrs = [] if path == '/' : xattrs += [FileSystem.xattr_prefix + a for a in FileSystem.xattr_fields_root] xattrs += [FileSystem.xattr_prefix + 'bextract.' + a for a in FileSystem.xattr_fields_bextract] if (head in self.dirs and tail in self.dirs[head] and len(self.dirs[head][tail]) != 1) : xattrs += [FileSystem.xattr_prefix + a for a in FileSystem.xattr_fields] # We are asked for size of the attr list, ie. joint size of attrs # plus null separators. if size == 0: return len("".join(xattrs)) + len(xattrs) return xattrs def getattr(self, path): ''' Retrieve file attributes. Notes: 1) Bacula does not store attributes for parent directories that are not being explicitly backed up, so we provide a default set of attributes FileSystem.null_stat 2) file attributes are base64-encoded and stored by Bacula in the catalog. These attributes are decoded when first needed and then cached for subsequent requests. 3) python fuse expects atime/ctime/mtime to be positive ''' head, tail = self._split(path) if head in self.dirs and tail in self.dirs[head] : self._getattr_lock.acquire() attrs = self.dirs[head][tail][-1] # decode and cache stat info if not attrs : self.dirs[head][tail] = self.dirs[head][tail][:-1] + self._bacula_stat(self.dirs[head][tail][-3]) attrs = self.dirs[head][tail][-1] # zero negative timestamps for a in ['st_atime','st_mtime','st_ctime'] : t = getattr(attrs, a) if t < 0 : self.logger.warning('%s has negative timestamp %s=%d, will use 0' % (path, a, t)) setattr(attrs, a, 0) self._getattr_lock.release() return attrs else: return -errno.ENOENT def readdir(self, path, offset): ''' read directory entries ''' path = path if path.endswith('/') else path+'/' for key in ['.','..'] : yield fuse.Direntry(key) for key in self.dirs[path].keys() : if len(key) > 0: if self.use_ino: bs = self.getattr(path + key) ino = bs.st_ino else : ino = 0 yield fuse.Direntry(key, ino=ino) def readlink(self, path): ''' read link contents ''' realpath = self._extract([path])[0] if realpath : link = os.readlink(realpath) if self.move_root and link.startswith('/') : link = os.path.normpath(self.fuse_args.mountpoint + link) return link return -errno.ENOENT class _File(object) : def __init__(self, fs, path, flags, *mode) : self.fs = fs accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR if (flags & accmode) != os.O_RDONLY: raise IOError(errno.EACCES, '') self.path = path self.realpath = fs._extract([path])[0] self.file = os.fdopen(os.open(self.realpath, flags, *mode), flag2mode(flags)) self.fd = self.file.fileno() self.direct_io = False self.keep_cache = True def read(self, length, offset): self.file.seek(offset) return self.file.read(length) def release(self, flags): self.file.close() def _bextract_version() : ''' return version string of bextract, return None if not runnable or version cannot be parsed ''' version = None try : child = pexpect.spawn('bextract -?') i = child.expect(['Version: ([^(]*) \(([^)]*)\)', pexpect.EOF]) if i == 0 : version = '%s (%s)' % child.match.groups() child.close() except : pass return version def main(): usage = """ BaculaFS: exposes the Bacula catalog and storage as a Filesystem in USErspace """ + Fuse.fusage bacula_version = _bextract_version() server = FileSystem(version="BaculaFS version: %s\nbextract version: %s\nPython FUSE version: %s" % (__version__, bacula_version, fuse.__version__), usage=usage) server.multithreaded = True server.parser.add_option(mountopt="driver", choices=Database.drivers, metavar='|'.join(Database.drivers), default=server.driver, help="database driver [default: %default]") server.parser.add_option(mountopt="host", metavar="HOST", default=server.host, help="database server address [default: %default]") server.parser.add_option(mountopt="port", metavar="PORT", default=server.port, type="int", help="database server port") server.parser.add_option(mountopt="database", metavar="PATH", default=server.database, help="database name [default: bacula]") server.parser.add_option(mountopt="username", metavar="USERNAME", default=server.username, help="database user name [default: %default]") server.parser.add_option(mountopt="password", metavar="PASSWORD", default=server.password, help="database password (use '-o password= ' to get a password prompt; if not provided, the password is read from the DATABASE_PASSWORD environment variable)") server.parser.add_option(mountopt="conf", metavar="PATH", default=server.conf, help="storage daemon configuration file [default: %default]") server.parser.add_option(mountopt="client", metavar="CLIENT", default=server.client, help="file daemon name") server.parser.add_option(mountopt="fileset", metavar="FILESET", default=server.fileset, help="backup fileset") server.parser.add_option(mountopt="device", metavar="DEVICE", default=server.device, help="storage device name [default: %default]") server.parser.add_option(mountopt="datetime", metavar="'YYYY-MM-DD hh:mm:ss'", default=server.datetime, help="snapshot date/time [default: now]") server.parser.add_option(mountopt="recent_job", action="store_true", default=server.recent_job, help="select contents of most recent job only [default: %default]") server.parser.add_option(mountopt="joblist", metavar="'JOBID1 JOBID2 ...'", default=server.joblist, help="select contents of specified list of jobs") server.parser.add_option(mountopt="cleanup", action="store_true", default=server.cleanup, help="clean cache directory upon umount [default: %default]") server.parser.add_option(mountopt="move_root", action="store_true", default=server.move_root, help="make absolute path symlinks point to path under mount point [default: %default]") server.parser.add_option(mountopt="prefetch_attrs", action="store_true", default=server.prefetch_symlinks, help="read and parse attributes for all files upon filesystem initialization [default: %default]") server.parser.add_option(mountopt="prefetch_symlinks", action="store_true", default=server.prefetch_symlinks, help="extract all symbolic links upon filesystem initialization (implies prefetch_attrs) [default: %default]") server.parser.add_option(mountopt="prefetch_regex", metavar="REGEX", default=server.prefetch_regex, help="extract all objects that match REGEX upon filesystem initialization (implies prefetch_attrs)") server.parser.add_option(mountopt="prefetch_recent", action="store_true", default=server.prefetch_recent, help="extract contents of most recent non-full job upon filesystem initialization (implies prefetch_symlinks) [default: %default]") server.parser.add_option(mountopt="prefetch_diff", metavar="PATH", default=server.prefetch_diff, help="extract files that do not match files at PATH (hint: speeds up rsync; implies prefetch_symlinks)") server.parser.add_option(mountopt="prefetch_difflist", metavar="DIFFLIST", default=server.prefetch_difflist, help="extract files that do not match files in DIFFLIST (list line format: 'Day Mon DD hh:mm:ss YYYY PATH'; use '-' to read from standard input; hint: format matches output of 'duplicity list-current-files -v0 target_url'; implies prefetch_symlinks)") server.parser.add_option(mountopt="prefetch_list", metavar="LIST", default=server.prefetch_list, help="extract files that match files in LIST (list should contains one absolute file path per line; use '-' to read from standard input; implies prefetch_symlinks)") server.parser.add_option(mountopt="prefetch_everything", action="store_true", default=server.prefetch_everything, help="extract everything upon filesystem initialization (complete restore to cache) [default: %default]") server.parser.add_option(mountopt="batch_list", action="store_true", default=server.batch_list, help="list files to be prefetched and exit [default: %default]") server.parser.add_option(mountopt="batch_bsr", action="store_true", default=server.batch_bsr, help="dump contnets of bsr file for extracting prefetched files and exit [default: %default]") server.parser.add_option(mountopt="batch_extract", action="store_true", default=server.batch_extract, help="extract prefetched files to mount point and exit [default: %default]") server.parser.add_option(mountopt="user_cache_path", metavar="PATH", default=server.user_cache_path, help="user specified cache path (hint: combine this with one of the prefetch options) [default: %default]") server.parser.add_option(mountopt="logging", choices=LOGGING_LEVELS.keys(), metavar='|'.join(LOGGING_LEVELS.keys()), default=server.logging, help="logging level [default: %default]") server.parser.add_option(mountopt="syslog", action="store_true", default=server.syslog, help="log to both syslog and console [default: %default]") server.parse(values=server, errex=1) if server.fuse_args.mount_expected() : if not bacula_version : raise RuntimeError, 'cannot determine Bacula bextract version - is it installed?' else : # we initialize before main (i.e. not in fsinit) so that # any failure here aborts the mount try : server.initialize(bacula_version) except : server.shutdown() raise if not server.batch_mode : server.main() # we shutdown after main, i.e. not in fsshutdown, because # calling fsshutdown with multithreaded==True seems to cause # the python fuse process to hang waiting for the python gil if server.fuse_args.mount_expected() : server.shutdown()
PypiClean
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/extensions/tracing/opentelemetry.py
from __future__ import annotations import enum from copy import deepcopy from inspect import isawaitable from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Optional from opentelemetry import trace from opentelemetry.trace import SpanKind from strawberry.extensions import SchemaExtension from strawberry.extensions.utils import get_path_from_info from .utils import should_skip_tracing if TYPE_CHECKING: from graphql import GraphQLResolveInfo from opentelemetry.trace import Span, Tracer from strawberry.types.execution import ExecutionContext DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" ArgFilter = Callable[[Dict[str, Any], "GraphQLResolveInfo"], Dict[str, Any]] class RequestStage(enum.Enum): REQUEST = enum.auto() PARSING = enum.auto() VALIDATION = enum.auto() class OpenTelemetryExtension(SchemaExtension): _arg_filter: Optional[ArgFilter] _span_holder: Dict[RequestStage, Span] = dict() _tracer: Tracer def __init__( self, *, execution_context: Optional[ExecutionContext] = None, arg_filter: Optional[ArgFilter] = None, ): self._arg_filter = arg_filter self._tracer = trace.get_tracer("strawberry") if execution_context: self.execution_context = execution_context def on_operation(self) -> Generator[None, None, None]: self._operation_name = self.execution_context.operation_name span_name = ( f"GraphQL Query: {self._operation_name}" if self._operation_name else "GraphQL Query" ) self._span_holder[RequestStage.REQUEST] = self._tracer.start_span( span_name, kind=SpanKind.SERVER ) self._span_holder[RequestStage.REQUEST].set_attribute("component", "graphql") if self.execution_context.query: self._span_holder[RequestStage.REQUEST].set_attribute( "query", self.execution_context.query ) yield # If the client doesn't provide an operation name then GraphQL will # execute the first operation in the query string. This might be a named # operation but we don't know until the parsing stage has finished. If # that's the case we want to update the span name so that we have a more # useful name in our trace. if not self._operation_name and self.execution_context.operation_name: span_name = f"GraphQL Query: {self.execution_context.operation_name}" self._span_holder[RequestStage.REQUEST].update_name(span_name) self._span_holder[RequestStage.REQUEST].end() def on_validate(self) -> Generator[None, None, None]: ctx = trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]) self._span_holder[RequestStage.VALIDATION] = self._tracer.start_span( "GraphQL Validation", context=ctx, ) yield self._span_holder[RequestStage.VALIDATION].end() def on_parse(self) -> Generator[None, None, None]: ctx = trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]) self._span_holder[RequestStage.PARSING] = self._tracer.start_span( "GraphQL Parsing", context=ctx ) yield self._span_holder[RequestStage.PARSING].end() def filter_resolver_args( self, args: Dict[str, Any], info: GraphQLResolveInfo ) -> Dict[str, Any]: if not self._arg_filter: return args return self._arg_filter(deepcopy(args), info) def add_tags( self, span: Span, info: GraphQLResolveInfo, kwargs: Dict[str, Any] ) -> None: graphql_path = ".".join(map(str, get_path_from_info(info))) span.set_attribute("component", "graphql") span.set_attribute("graphql.parentType", info.parent_type.name) span.set_attribute("graphql.path", graphql_path) if kwargs: filtered_kwargs = self.filter_resolver_args(kwargs, info) for kwarg, value in filtered_kwargs.items(): span.set_attribute(f"graphql.param.{kwarg}", value) async def resolve(self, _next, root, info, *args, **kwargs) -> Any: if should_skip_tracing(_next, info): result = _next(root, info, *args, **kwargs) if isawaitable(result): # pragma: no cover result = await result return result with self._tracer.start_as_current_span( f"GraphQL Resolving: {info.field_name}", context=trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]), ) as span: self.add_tags(span, info, kwargs) result = _next(root, info, *args, **kwargs) if isawaitable(result): result = await result return result class OpenTelemetryExtensionSync(OpenTelemetryExtension): def resolve(self, _next, root, info, *args, **kwargs) -> Any: if should_skip_tracing(_next, info): result = _next(root, info, *args, **kwargs) return result with self._tracer.start_as_current_span( f"GraphQL Resolving: {info.field_name}", context=trace.set_span_in_context(self._span_holder[RequestStage.REQUEST]), ) as span: self.add_tags(span, info, kwargs) result = _next(root, info, *args, **kwargs) return result
PypiClean
/AltAnalyze-2.1.3.15.tar.gz/AltAnalyze-2.1.3.15/altanalyze/stats_scripts/preAligned.py
import sys,string,os,shutil sys.path.insert(1, os.path.join(sys.path[0], '..')) ### import parent dir dependencies from scipy import sparse, io import numpy import LineageProfilerIterate import cluster_corr import export from import_scripts import ChromiumProcessing import traceback """ cellHarmony without alignment """ def cellHarmony(species,platform,query_exp_file,exp_output, customMarkers=False,useMulti=False,fl=None,customLabels=None): """ Prepare pre-aligned result files in a pre-defined format for cellHarmony post-aligment differential and visualization analyses """ customLabels = fl.Labels() reference_exp_file = customMarkers ### pre-formatted from Seurat or other outputs export_directory = os.path.abspath(os.path.join(query_exp_file, os.pardir)) if 'ExpressionInput' in query_exp_file: ### Change to the root directory above ExpressionINput export_directory = os.path.abspath(os.path.join(export_directory, os.pardir)) dataset_name = string.replace(string.split(query_exp_file,'/')[-1][:-4],'exp.','') try: os.mkdir(export_directory+'/cellHarmony/') except: pass try: os.mkdir(export_directory+'/cellHarmony/CellClassification/') except: pass try: os.mkdir(export_directory+'/cellHarmony/OtherFiles/') except: pass ### Get the query and reference cells, dataset names refererence_cells, query_cells, reference_dataset, query_dataset = importCelltoClusterAnnotations(customLabels) ### Get the reference and query cells in their respective order ### copy and re-name the input expression file to the output cellHarmony directory if len(reference_dataset)>0 and len(query_dataset)>0: target_exp_dir = export_directory+'/cellHarmony/exp.'+reference_dataset+'__'+query_dataset+'-AllCells.txt' else: target_exp_dir = export_directory+'/cellHarmony/exp.cellHarmony-reference__Query-AllCells.txt' reference_dataset = 'cellHarmony-reference' shutil.copy(query_exp_file,target_exp_dir) ### filter and export the heatmap with just reference cells cell_cluster_order = simpleHeaderImport(reference_exp_file) filtered_reference_cells=[] filtered_query_cells_db={} filtered_query_cells=[] representative_refcluster_cell = {} for cell_id in cell_cluster_order: if cell_id in refererence_cells: filtered_reference_cells.append(cell_id) cluster_label = refererence_cells[cell_id].Label() ### Identifies where to place each query cell try: representative_refcluster_cell[cluster_label].append(cell_id) except: representative_refcluster_cell[cluster_label] = [cell_id] elif cell_id in query_cells: filtered_query_cells_db[cell_id]=query_cells[cell_id] filtered_query_cells.append(cell_id) #reference_output_file = export.findParentDir(reference_exp_file)+'/'+reference_dataset+'.txt' reference_output_file = export_directory+'/cellHarmony/OtherFiles/'+reference_dataset+'.txt' reference_output_file2 = export_directory+'/cellHarmony/exp.'+reference_dataset+'__'+query_dataset+'-Reference.txt' query_output_file =export_directory+'/'+query_dataset+'.txt' ### Write out separate refernece and query files from import_scripts import sampleIndexSelection sampleIndexSelection.filterFile(reference_exp_file,reference_output_file,['row_clusters-flat']+filtered_reference_cells,force=True) sampleIndexSelection.filterFile(target_exp_dir,query_output_file,filtered_query_cells,force=True) shutil.copy(reference_output_file,reference_output_file2) ### export the CellClassification file output_classification_file = export_directory+'/cellHarmony/CellClassification/CellClassification.txt' exportCellClassifications(output_classification_file,filtered_query_cells_db,filtered_query_cells,representative_refcluster_cell) labels_file = export_directory+'/labels.txt' exportLabels(labels_file,filtered_reference_cells,refererence_cells) fl.setLabels(labels_file) print 'Files formatted for cellHarmony... running differential expression analyses' try: print reference_output_file print query_output_file print output_classification_file LineageProfilerIterate.harmonizeClassifiedSamples(species, reference_output_file, query_output_file, output_classification_file,fl=fl) except: print '\nFAILED TO COMPLETE THE FULL CELLHARMONY ANALYSIS (SEE LOG FILE)...' print traceback.format_exc() return True def exportCellClassifications(output_file,query_cells,filtered_query_cells,representative_refcluster_cell): """ Match the Louvain cellHarmony export format for the classification file """ header = 'Query Barcode\tRef Barcode\tCorrelation\tQuery Partition\tRef Partition\tLabel\n' o = open(output_file,'w') o.write(header) for query_barcode in filtered_query_cells: CI = query_cells[query_barcode] cluster_number = CI.ClusterNumber() label = CI.Label() ref_barcode = representative_refcluster_cell[label][-1] values = [query_barcode,ref_barcode,'1.0',cluster_number,cluster_number,label] o.write(string.join(values,'\t')+'\n') o.close() def exportLabels(labels_file,filtered_reference_cells,refererence_cells): l = open(labels_file,'w') for cell_id in filtered_reference_cells: CI = refererence_cells[cell_id] cluster_number = CI.ClusterNumber() label = CI.Label() values = [cell_id,cluster_number,label] l.write(string.join(values,'\t')+'\n') l.close() def simpleHeaderImport(filename): for line in open(filename,'rU').xreadlines(): data = cleanUpLine(line) if '\t' in data: t = string.split(data,'\t') else: t = string.split(data,',') header = t[2:] header2 = [] for h in header: if ":" in h: h = string.split(h,':')[-1] header2.append(h) break return header2 class CellInfo: def __init__(self,cell_id, cluster_number, dataset_name, dataset_type, label): self.cell_id = cell_id; self.cluster_number = cluster_number; self.dataset_name = dataset_name self.dataset_type = dataset_type; self.label = label def CellID(self): return self.cell_id def ClusterNumber(self): return self.cluster_number def DatasetName(self): return self.dataset_name def DataSetType(self): return self.dataset_type def Label(self): return self.label def __repr__(self): return self.CellID()+'|'+self.Label()+'|'+self.DataSetType() def importCelltoClusterAnnotations(filename): firstRow = True refererence_cells={} query_cells={} for line in open(filename,'rU').xreadlines(): data = cleanUpLine(line) if '\t' in data: t = string.split(data,'\t') else: t = string.split(data,',') if firstRow: ci = t.index('cell_id') cn = t.index('cluster_number') try: cm = t.index('cluster_name') except: cm = False dn = t.index('dataset_name') dt = t.index('dataset_type') firstRow = False else: cell_id = t[ci] cluster_number = t[cn] dataset_name = t[dn] dataset_type = t[dt] if cm != False: cluster_name = t[cm] label = cluster_name + '_c'+cluster_number else: label = 'c'+cluster_number if string.lower(dataset_type)[0] == 'r': dataset_type = 'Reference' reference_dataset = dataset_name CI = CellInfo(cell_id, cluster_number, dataset_name, dataset_type, label) refererence_cells[cell_id]=CI else: dataset_type = 'Query' query_dataset = dataset_name CI = CellInfo(cell_id, cluster_number, dataset_name, dataset_type, label) query_cells[cell_id]=CI return refererence_cells, query_cells, reference_dataset, query_dataset def cleanUpLine(line): line = string.replace(line,'\n','') line = string.replace(line,'\c','') data = string.replace(line,'\r','') data = string.replace(data,'"','') return data if __name__ == '__main__': platform = 'RNASeq' cellHarmony(genome,platform,args.query_h5,None, customMarkers=args.reference_h5,useMulti=False,fl=None,customLabels=labels)
PypiClean
/Nasse-1.1-py3-none-any.whl/nasse/utils/logging.py
import datetime import inspect import linecache import flask from nasse import config RECORDING = False LOG_STACK = [] CALL_STACK = [] class Colors: normal = '\033[0m' grey = '\033[90m' red = '\033[91m' green = '\033[92m' blue = '\033[94m' cyan = '\033[96m' white = '\033[97m' yellow = '\033[93m' magenta = '\033[95m' _colors = {normal, grey, red, green, blue, cyan, white, yellow, magenta} class LogLevel(): def __init__(self, level: str, template: str, debug: bool = False) -> None: self.level = str(level) self.template = str(template) self.debug = bool(debug) self._draw_time = "{time}" in self.template self._draw_name = "{name}" in self.template self._draw_step = "{step}" in self.template self._draw_message = "{message}" in self.template def __repr__(self) -> str: return "<LogLevel: {level}>".format(level=self.level) class LogLevels: INFO = LogLevel(level="Info", template=Colors.grey + "{time}|" + Colors.normal + "[INFO] ({name}) [{step}] {message}") DEBUG = LogLevel(debug=True, level="Debug", template=Colors.grey + "{time}|" + Colors.normal + "[DEBUG] ({name}) [{step}] {message}") WARNING = LogLevel(level="Warning", template=Colors.grey + "{time}|" + Colors.normal + "[WARNING] ({name}) [{step}] " + Colors.yellow + "{message}" + Colors.normal) ERROR = LogLevel(level="Error", template=Colors.grey + "{time}|" + Colors.normal + "[ERROR] ({name}) [{step}] " + Colors.red + "!! {message} !!" + Colors.normal) def __repr__(self) -> str: return "<LogLevels Container>" class StackFrame(): def __init__(self, frame) -> None: # print(dir(frame)) # print(dir(frame.f_code)) self.name = frame.f_code.co_name self.filename = frame.f_code.co_filename self.lineno = frame.f_lineno self.back_frame = frame.f_back self._line = None self._calling_line = None def __repr__(self) -> str: return "<NasseStackFrame '{name}' {filename} on {line_number}>".format(name=self.name, filename=self.filename, line_number=self.lineno) @property def line(self): if self._line is None: self._line = linecache.getline(self.filename, self.lineno) return self._line.strip() @property def calling_line(self): if self._calling_line is None: self._calling_line = linecache.getline( self.back_frame.f_code.co_filename, self.back_frame.f_lineno) return self._calling_line.strip() def as_dict(self) -> dict: return { "name": self.name, "filename": self.filename, "lineNumber": self.lineno, "calledBy": "<{name}>, in {filename} at line {line_number}".format(name=self.back_frame.f_code.co_name, filename=self.back_frame.f_code.co_filename, line_number=self.back_frame.f_code.co_firstlineno) } def add_to_call_stack(frame, event, arg): """ Internal function to add a call to the call stack """ if RECORDING and event == "call": if config.Mode.FULL_DEBUG: CALL_STACK.append(StackFrame(frame)) elif frame.f_code.co_filename.startswith(str(config.General.BASE_DIR)): CALL_STACK.append(StackFrame(frame)) return None def clear_log(): try: name = flask.g.request.app.name except Exception: name = config.General.NAME try: app_id = flask.g.request.app.id except Exception: app_id = "".join(l for l in str(name) if l.isalpha() or l.isdecimal()).lower() with open(config.General.BASE_DIR / "{id}.nasse.log".format(id=app_id), "w", encoding="utf8") as out: out.write("-- {name} DEBUG LOG --\n\n".format(name=str(name).upper())) def write_log(new_line: str): """Writing out the log, wether it's to the log stack or the log file""" #new_line = str(new_line).replace("\n", " ") new_line = str(new_line) for color in Colors._colors: new_line = new_line.replace(color, "") if RECORDING: LOG_STACK.append(new_line) if config.Mode.DEBUG: try: name = flask.g.request.app.name except Exception: name = config.General.NAME try: app_id = flask.g.request.app.id except Exception: app_id = "".join(l for l in str( name) if l.isalpha() or l.isdecimal()).lower() with open(config.General.BASE_DIR / "{id}.nasse.log".format(id=app_id), "a", encoding="utf8") as out: out.write(str(new_line) + "\n") def caller_name(skip: int = 2): """ https://stackoverflow.com/a/9812105/11557354 Get a name of a caller in the format module.class.method `skip` specifies how many levels of stack to skip while getting caller name. skip=1 means "who calls me", skip=2 "who calls my caller" etc. An empty string is returned if skipped levels exceed stack height """ stack = inspect.stack() start = 0 + skip if len(stack) < start + 1: return '' parentframe = stack[start][0] name = [] module = inspect.getmodule(parentframe) if module: name.append(module.__name__) if 'self' in parentframe.f_locals: name.append(parentframe.f_locals['self'].__class__.__name__) codename = parentframe.f_code.co_name if codename != '<module>': # top level usually name.append(codename) # function or a method del parentframe, stack return ".".join(name) def log(message: str = "Log", level: LogLevel = LogLevels.DEBUG, step: str = None): if config.Mode.PRODUCTION: return now = datetime.datetime.now() write_log("{time}|[{level}] [{step}] {message}".format(time=now.timestamp(), level=level.level.upper(), step=( step if step is not None else (caller_name() if config.Mode.DEBUG else 'app')), message=message)) if not level.debug or config.Mode.DEBUG: formatting = {} if level._draw_time: formatting["time"] = config.General.LOGGING_TIME_FORMAT(now) if callable( config.General.LOGGING_TIME_FORMAT) else now.strftime(str(config.General.LOGGING_TIME_FORMAT)) if level._draw_step: formatting["step"] = step if step is not None else ( caller_name() if config.Mode.DEBUG else 'Nasse App') if level._draw_name: try: name = flask.g.request.app.name except Exception: name = config.General.NAME formatting["name"] = name if level._draw_message: formatting["message"] = message print(level.template.format(**formatting)) class Record(): _call_stack = [] _log_stack = [] @property def CALL_STACK(self): if RECORDING: return CALL_STACK.copy() return self._call_stack.copy() @property def LOG_STACK(self): if RECORDING: return LOG_STACK.copy() return self._log_stack.copy() def start(self): global RECORDING CALL_STACK.clear() LOG_STACK.clear() RECORDING = True def stop(self): global RECORDING self._call_stack = CALL_STACK.copy() self._log_stack = LOG_STACK.copy() RECORDING = False CALL_STACK.clear() LOG_STACK.clear() return self._call_stack, self._log_stack def __enter__(self): self.start() return self def __exit__(self, type, value, traceback): self.stop()
PypiClean
/Mopidy-Transistor-0.2.0.tar.gz/Mopidy-Transistor-0.2.0/mopidy_transistor/library.py
import mopidy import logging from urllib.request import urlopen import ssl import certifi from pathlib import Path import podcastparser import threading from mopidy.internal import storage as internal_storage logger = logging.getLogger(__name__) class Library(object): def __init__(self, json_file, podcast_timeout=5.0): self._json_file = Path(json_file) self._podcast_timeout = podcast_timeout self.load() def save(self): internal_storage.dump(self._json_file, self.data) def load(self): if not self._json_file.is_file(): self.data = { "version": mopidy.__version__, "radio_banks": { "AM": [ { "name": "FIP", "stream_url": "http://direct.fipradio.fr/live/fip-midfi.mp3", "position": 64, }, { "name": "Meeeuh", "stream_url": "http://radiomeuh.ice.infomaniak.ch/radiomeuh-128.mp3", "position": 32, }, ], "FM": [ { "name": "Inter", "stream_url": "http://direct.franceinter.fr/live/franceinter-midfi.mp3", "position": 32, }, { "name": "Culture", "stream_url": "http://direct.franceculture.fr/live/franceculture-midfi.mp3", "position": 64, }, ], }, "podcasts": [ { "name": "TEDx", "feed_url": "http://www.npr.org/rss/podcast.php?id=510298", "episodes": [], "position": 64, }, { "name": "Revolt", "feed_url": "http://wordsmith.podomatic.com/rss2.xml", "episodes": [], "position": 32, }, { "name": "Neo Geo", "feed_url": "http://feeds.feedburner.com/NeoGeoNova", "position": 10, }, { "name": "Juke Box", "feed_url": "http://radiofrance-podcast.net/podcast09/rss_16999.xml", "position": 20, }, ], } self.save() self.data = internal_storage.load(self._json_file) def update_podcasts(self): def run(): try: for podcast in self.data["podcasts"]: raw = urlopen( podcast["feed_url"], timeout=self._podcast_timeout, context=ssl.create_default_context( cafile=certifi.where() ), ) parsed = podcastparser.parse(podcast["feed_url"], raw) episodes = parsed["episodes"] podcast["episodes"] = [] for episode in episodes: title = episode["title"] media_url = episode["enclosures"][0]["url"] # podcast['episodes'].append({"title":unicodedata.normalize('NFKD', title).encode('ascii','ignore'), "url":media_url}) podcast["episodes"].append( {"title": title, "url": media_url} ) self.save() logger.info( "Transistor Library: done downloading podcasts infos" ) except Exception as e: logger.error( "Transistor: Can't retrieve podcast data: {}".format( str(e) ) ) thr = threading.Thread(target=run) thr.start()
PypiClean
/Gammalearn-0.11.0.tar.gz/Gammalearn-0.11.0/gammalearn/data/example_settings/experiment_settings_square_pixels.py
import collections import os import importlib from pathlib import Path import math import numpy as np import torch from torch.optim import lr_scheduler from torchvision.models.mobilenet import mobilenet_v2 from torchmetrics.classification import Accuracy, AUROC from pytorch_lightning.profiler import SimpleProfiler, AdvancedProfiler, PyTorchProfiler import gammalearn.criterions as criterions import gammalearn.optimizers as optimizers import gammalearn.steps as steps from gammalearn.callbacks import (LogGradientNorm, LogModelWeightNorm, LogModelParameters, LogUncertaintyLogVars, LogUncertaintyPrecisions, LogGradNormWeights, LogReLUActivations, LogLinearGradient, LogFeatures, WriteDL2Files) import gammalearn.utils as utils import gammalearn.datasets as dsets from gammalearn.data_handlers import GLearnDataModule from gammalearn.constants import GAMMA_ID, PROTON_ID, ELECTRON_ID import gammalearn.data.nets as nets # Experiment settings main_directory = str(Path.home()) + '/gammalearn_experiments' # TODO change directory if needed """str: mandatory, where the experiments are stored""" experiment_name = 'test_install' """str: mandatory, the name of the experiment. Should be different for each experiment, except if one wants to resume an old experiment """ info = '' """str: optional""" gpus = 1 """int or list: mandatory, the number of gpus to use. If -1, run on all GPUS, if None/0 run on CPU. If list, run on GPUS of list. """ log_every_n_steps = 3 """int: optional, the interval in term of iterations for on screen data printing during experiment. A small value may lead to a very large log file size. """ window_size = 100 """int: optional, the interval in term of stored values for metric moving computation""" checkpointing_options = dict(every_n_epochs=1, save_top_k=-1, save_last=True) """dict: optional, specific options for model checkpointing. See https://pytorch-lightning.readthedocs.io/en/stable/api/pytorch_lightning.callbacks.ModelCheckpoint.html for details. """ random_seed = 1 """int: optional, the manual seed to make experiments more reproducible""" monitor_device = True """bool: optional, whether or not monitoring the gpu utilization""" particle_dict = {GAMMA_ID: 0, PROTON_ID: 1, # ELECTRON_ID: 2, } """particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0""" targets = collections.OrderedDict({ 'energy': { 'output_shape': 1, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': { # 'functions': , }, 'mt_balancing': True }, 'impact': { 'output_shape': 2, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': {}, 'mt_balancing': True }, 'direction': { 'output_shape': 2, 'loss': torch.nn.L1Loss(reduction='none'), 'loss_weight': 1, 'metrics': {}, 'mt_balancing': True }, 'class': { 'label_shape': 1, 'output_shape': len(particle_dict), 'loss': torch.nn.CrossEntropyLoss(), 'loss_weight': 1, 'metrics': { 'Accuracy_particle': Accuracy(threshold=0.5), 'AUC_particle': AUROC(pos_label=particle_dict[GAMMA_ID], num_classes=len(particle_dict), compute_on_step=True ) }, 'mt_balancing': True } }) """dict: mandatory, defines for every objectives of the experiment the loss function and its weight """ dataset_class = dsets.MemoryLSTDataset # dataset_class = dsets.FileLSTDataset """Dataset: mandatory, the Dataset class to load the data. Currently 2 classes are available, MemoryLSTDataset that loads images in memory, and FileLSTDataset that loads images from files during training. """ dataset_parameters = { 'camera_type': 'LST_LSTCam', 'group_by': 'image', 'use_time': True, 'particle_dict': particle_dict, 'targets': list(targets.keys()), # 'subarray': [1], } """dict: mandatory, the parameters of the dataset. camera_type is mandatory and can be: 'LST_LSTCam', 'MST_NectarCam', 'MST_FlashCam', 'SST_ASTRICam', 'SST1M_DigiCam', 'SST_CHEC', 'MST-SCT_SCTCam'. group_by is mandatory and can be 'image', 'event_all_tels', 'event_triggered_tels'. particle_dict is mandatory and maps cta particle types with class id. e.g. gamma (0) is class 0, proton (101) is class 1 and electron (1) is class 2. use_time (optional): whether or not to use time information subarray (optional): the list of telescope ids to select as a subarray """ preprocessing_workers = 4 """int: optional, the max number of workers to create dataset.""" dataloader_workers = 4 """int: optional, the max number of workers for the data loaders. If 0, data are loaded from the main thread.""" mp_start_method = 'fork' """str: optional, the method to start new process in [fork, spawn]""" # Net settings # Uncomment following lines to import your network from an external file # net_definition_file = utils.nets_definition_path() # """str: mandatory, the file where to find the net definition to use""" # # Load the network definitions module # # spec = importlib.util.spec_from_file_location("nets", net_definition_file) # nets = importlib.util.module_from_spec(spec) # spec.loader.exec_module(nets) net_parameters_dic = { 'model': nets.GammaPhysNet, 'parameters': { 'backbone': { 'model': nets.TorchConvNet, 'parameters': { 'model': mobilenet_v2, 'parameters': { 'num_channels': 2, 'output_size': (14, 14), 'pretrained': False, } } }, 'fc_width': 256, 'non_linearity': torch.nn.ReLU, 'last_bias_init': None, 'targets': {k: v.get('output_shape', 0) for k, v in targets.items()} } } """dict: mandatory, the parameters of the network. Depends on the network chosen. Must include at least a model and a parameters field. """ # checkpoint_path = main_directory + '/test_install/checkpoint_epoch=1.ckpt' """str: optional, the path where to find the backup of the model to resume""" profiler = None # profiler = {'profiler': SimpleProfiler, # 'options': dict(extended=True) # } """str: optional, the profiler to use""" ###################################################################################################################### train = True """bool: mandatory, whether or not to train the model""" # Data settings data_module_train = { 'module': GLearnDataModule, 'paths': [ Path(__file__).parent.absolute().joinpath('../../../share/data/MC_data').resolve().as_posix(), ], # TODO fill your folder path 'image_filter': { # utils.intensity_filter: {'intensity': [50, np.inf]}, # utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, # utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, }, 'event_filter': { # utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV # utils.emission_cone_filter: {'max_angle': 0.0698}, # utils.impact_distance_filter: {'max_distance': 200}, # utils.telescope_multiplicity_filter: {'multiplicity': 2}, }, 'transform': dsets.ResampleImage('bilinear_interpolation', (55, 55, 1)), 'target_transform': None } """paths->list: mandatory, the folders where to find the hdf5 data files""" """image_filter->dict: optional, the filter(s) to apply to the dataset at image level""" """event_filter->dict: optional, the filter(s) to apply to the dataset""" validating_ratio = 0.2 """float: mandatory, the ratio of data to create the validating set""" max_epochs = 2 """int: mandatory, the maximum number of epochs for the experiment""" batch_size = 4 """int: mandatory, the size of the mini-batch""" # train_files_max_number = 1 """int: optional, the max number of files to use for the dataset""" pin_memory = True """bool: optional, whether or not to pin memory in dataloader""" # Training settings loss_options = { 'conditional': True, 'gamma_class': dataset_parameters['particle_dict'][0], } loss_balancing_options = { 'logvar_coeff': [2, 2, 2, 0.5], # for uncertainty 'penalty': 0, # for uncertainty } """dict: mandatory, defines for every objectives of the experiment the loss function and its weight """ loss_balancing = criterions.MultilossBalancing(targets, **loss_balancing_options) """function: mandatory, the function to compute the loss""" optimizer_dic = { 'network': optimizers.load_sgd, 'loss_balancing': optimizers.load_adam } """dict: mandatory, the optimizers to use for the experiment. One may want to use several optimizers in case of GAN for example """ optimizer_parameters = { 'network': {'lr': 1e-4, 'weight_decay': 1e-7, 'momentum': 0.9, 'nesterov': True }, 'loss_balancing': {'lr': 0.025, 'weight_decay': 1e-4, }, } """dict: mandatory, defines the parameters for every optimizers to use""" # regularization = {'function': 'gradient_penalty', # 'weight': 10} """dict: optional, regularization to use during the training process. See in optimizers.py for available regularization functions. If `function` is set to 'gradient_penalty', the training step must be `training_step_mt_gradient_penalty`.""" experiment_hparams = { 'add_pointing': False } training_step = steps.get_training_step_mt(**experiment_hparams) # training_step = steps.training_step_gradnorm # training_step = steps.training_step_mt_gradient_penalty """function: mandatory, the function to compute the training step""" eval_step = steps.get_eval_step_mt(**experiment_hparams) """function: mandatory, the function to compute the validating step""" check_val_every_n_epoch = 1 """int: optional, the interval in term of epoch for validating the model""" lr_schedulers = { 'network': { lr_scheduler.StepLR: { 'gamma': 0.1, 'step_size': 10, } }, # 'network': { # lr_scheduler.ReduceLROnPlateau: { # 'factor': 0.1, # 'patience': 30, # } # }, # 'network': { # lr_scheduler.MultiStepLR: { # 'gamma': 0.1, # 'milestones': [10, 15, 18], # } # }, # 'network': { # lr_scheduler.ExponentialLR: { # 'gamma': 0.9, # } # }, } """dict: optional, defines the learning rate schedulers""" # callbacks training_callbacks = [ LogGradientNorm(), LogModelWeightNorm(), LogModelParameters(), LogUncertaintyLogVars(), LogUncertaintyPrecisions(), # LogGradNormWeights(), LogReLUActivations(), LogLinearGradient(), # LogFeatures(), # Do not use during training !! Very costly !! ] """dict: list of callbacks """ ###################################################################################################################### # Testing settings test = True """bool: mandatory, whether or not to test the model at the end of training""" merge_test_datasets = False """bool: optional, whether or not to merge test datasets""" data_module_test = { 'module': GLearnDataModule, 'paths': [ Path(__file__).parent.absolute().joinpath('../../../share/data/MC_data').resolve().as_posix(), ], 'image_filter': { utils.intensity_filter: {'intensity': [10, np.inf]}, # # utils.cleaning_filter: {'picture_thresh': 6, 'boundary_thresh': 3, # # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, # utils.leakage_filter: {'leakage2_cut': 0.2, 'picture_thresh': 6, 'boundary_thresh': 3, # 'keep_isolated_pixels': False, 'min_number_picture_neighbors': 2}, }, 'event_filter': { # utils.energyband_filter: {'energy': [0.02, 2], 'filter_only_gammas': True}, # in TeV # utils.emission_cone_filter: {'max_angle': 0.0698}, # utils.impact_distance_filter: {'max_distance': 200}, # utils.telescope_multiplicity_filter: {'multiplicity': 2}, }, 'transform': dsets.ResampleImage('bilinear_interpolation', (55, 55, 1)), 'target_transform': None } """ dict: optional, must at least contain a non-empty 'source':{'paths:[]'} path->list of str: optional, the folders containing the hdf5 data files for the test image_filter->dict: optional, filter(s) to apply to the test set at image level event_filter->dict: optional, filter(s) to apply to the test set """ test_step = steps.get_test_step_mt(**experiment_hparams) """function: mandatory, the function to compute the validating step""" dl2_path = '' """str: optional, path to store dl2 files""" test_dataset_parameters = { # 'subarray': [1], } """dict: optional, the parameters of the dataset specific to the test operation.""" test_batch_size = 10 """int: optional, the size of the mini-batch for the test""" test_callbacks = [ WriteDL2Files() ] """dict: list of callbacks """
PypiClean
/Ceygen-0.3.tar.gz/Ceygen-0.3/doc/llt.rst
======================================== Cholesky Decomposition-powered Functions ======================================== This module contains algebraic functions powered by the Cholesky matrix decomposition (as provided by the <`Eigen/Cholesky`_> include). .. module:: ceygen.llt .. function:: cholesky(x[, out=None]) Compute Cholesky decomposition of matrix *x* (which must be square, Hermitian and positive-definite) so that *x* = *out* \* *out*.H (*out*.H being conjugate transpose of *out*) :param x: matrix to decompose :type x: |nonint_matrix| :param out: |out| :type out: |nonint_matrix| :raises: |valueerror| :raises: |typeerror| :rtype: |nonint_matrix| .. _`Eigen/Cholesky`: http://eigen.tuxfamily.org/dox/QuickRefPage.html#QuickRef_Headers .. include:: definitions.rst
PypiClean
/django-chuck-0.2.3.tar.gz/django-chuck/modules/feincms/project/static/scripts/libs/tiny_mce/plugins/contextmenu/editor_plugin.js
(function(){var a=tinymce.dom.Event,c=tinymce.each,b=tinymce.DOM;tinymce.create("tinymce.plugins.ContextMenu",{init:function(e){var h=this,f,d,i;h.editor=e;d=e.settings.contextmenu_never_use_native;h.onContextMenu=new tinymce.util.Dispatcher(this);f=e.onContextMenu.add(function(j,k){if((i!==0?i:k.ctrlKey)&&!d){return}a.cancel(k);if(k.target.nodeName=="IMG"){j.selection.select(k.target)}h._getMenu(j).showMenu(k.clientX||k.pageX,k.clientY||k.pageY);a.add(j.getDoc(),"click",function(l){g(j,l)});j.nodeChanged()});e.onRemove.add(function(){if(h._menu){h._menu.removeAll()}});function g(j,k){i=0;if(k&&k.button==2){i=k.ctrlKey;return}if(h._menu){h._menu.removeAll();h._menu.destroy();a.remove(j.getDoc(),"click",g);h._menu=null}}e.onMouseDown.add(g);e.onKeyDown.add(g);e.onKeyDown.add(function(j,k){if(k.shiftKey&&!k.ctrlKey&&!k.altKey&&k.keyCode===121){a.cancel(k);f(j,k)}})},getInfo:function(){return{longname:"Contextmenu",author:"Moxiecode Systems AB",authorurl:"http://tinymce.moxiecode.com",infourl:"http://wiki.moxiecode.com/index.php/TinyMCE:Plugins/contextmenu",version:tinymce.majorVersion+"."+tinymce.minorVersion}},_getMenu:function(e){var g=this,d=g._menu,j=e.selection,f=j.isCollapsed(),h=j.getNode()||e.getBody(),i,k;if(d){d.removeAll();d.destroy()}k=b.getPos(e.getContentAreaContainer());d=e.controlManager.createDropMenu("contextmenu",{offset_x:k.x+e.getParam("contextmenu_offset_x",0),offset_y:k.y+e.getParam("contextmenu_offset_y",0),constrain:1,keyboard_focus:true});g._menu=d;d.add({title:"advanced.cut_desc",icon:"cut",cmd:"Cut"}).setDisabled(f);d.add({title:"advanced.copy_desc",icon:"copy",cmd:"Copy"}).setDisabled(f);d.add({title:"advanced.paste_desc",icon:"paste",cmd:"Paste"});if((h.nodeName=="A"&&!e.dom.getAttrib(h,"name"))||!f){d.addSeparator();d.add({title:"advanced.link_desc",icon:"link",cmd:e.plugins.advlink?"mceAdvLink":"mceLink",ui:true});d.add({title:"advanced.unlink_desc",icon:"unlink",cmd:"UnLink"})}d.addSeparator();d.add({title:"advanced.image_desc",icon:"image",cmd:e.plugins.advimage?"mceAdvImage":"mceImage",ui:true});d.addSeparator();i=d.addMenu({title:"contextmenu.align"});i.add({title:"contextmenu.left",icon:"justifyleft",cmd:"JustifyLeft"});i.add({title:"contextmenu.center",icon:"justifycenter",cmd:"JustifyCenter"});i.add({title:"contextmenu.right",icon:"justifyright",cmd:"JustifyRight"});i.add({title:"contextmenu.full",icon:"justifyfull",cmd:"JustifyFull"});g.onContextMenu.dispatch(g,d,h,f);return d}});tinymce.PluginManager.add("contextmenu",tinymce.plugins.ContextMenu)})();
PypiClean
/Lantz-0.3.zip/Lantz-0.3/lantz/drivers/legacy/andor/neo.py
import ctypes as ct import numpy as np from lantz import Feat, Action, Q_ from lantz.foreign import RetStr, RetTuple from .andor import Andor class Neo(Andor): """Neo Andor CMOS Camera """ def initialize(self): super().initialize() self.flush() self.fan_speed = 1 self.width ,self.height = self.sensor_size self.length = self.width * self.height self.clock_rate = 100 self.pixel_encoding = 32 self.imagesizebytes = self.getint("ImageSizeBytes") self.userbuffer = ct.create_string_buffer(' ' * self.imagesizebytes) @Feat(None, values={32: 'Mono32', 64: 'Mono64'}) def pixel_encoding(self, value): """Pixel encoding. """ self.setenumstring("PixelEncoding", value) @Feat() def sensor_size(self): width = self.getint("SensorWidth") height = self.getint("SensorHeight") return width, height @Feat(None, values={100: '100 MHz', 200: '200 MHz', 280: '280 MHz'}) def clock_rate(self, value): """Pixel clock rate """ self.setenumstring("PixelReadoutRate", value) @Feat(None) def fan_peed(self, value = 1): """Fan speed. """ self.setenumerated("FanSpeed", value) @Feat() def sensor_temp(self): """Sensor temperature. """ return self.getfloat("SensorTemperature") @Feat() def exposure_time(self): """Get exposure time. """ return self.getfloat("ExposureTime") @exposure_time.setter def exposure_time(self, exposure): self.setfloat("ExposureTime", exposure) @Feat(None) def roi(self, width_height_top_left): """Set region of interest """ width, height, top, left = width_height_top_left self.setint("AOIWidth", width) self.setint("AOILeft", left) self.setint("AOIHeight", height) self.setint("AOITop", top) @Action() def take_image(self): """Image acquisition. """ self.queuebuffer(self.userbuffer, self.imagesizebytes) self.command("AcquisitionStart") self.waitbuffer(*RetStr(1)) self.command("AcquisitionStop") self.flush() image = np.fromstring(self.userbuffer, dtype=np.uint32, count=self.length) image.shape = (self.height, self.width) return image @Action() def take_image(self, numbuff, numframes): """Image acquisition with circular buffer. """ imagesizebytes = self.getint("ImageSizeBytes") userbuffer = [] for i in range(numbuff): userbuffer.append(ct.create_string_buffer(' ' * imagesizebytes)) self.queuebuffer(userbuffer, imagesizebytes) self.command("AcquisitionStart") for i in range(numbuff): self.waitbuffer(*RetStr(1)) self.queuebuffer(userbuffer[i], imagesizebytes) self.command("AcquisitionStop") self.flush() image = np.fromstring(userbuffer[0], dtype=np.uint32, count=self.length) image.shape = (self.height, self.width) return image
PypiClean
/MegEngine-1.13.1-cp37-cp37m-macosx_10_14_x86_64.whl/megengine/functional/utils.py
from ..core._imperative_rt.core2 import apply from ..core._imperative_rt.core2 import sync as _sync from ..core.ops.builtin import AssertEqual from ..tensor import Tensor from ..utils.deprecation import deprecated_func from .elemwise import abs, maximum, minimum from .tensor import ones, zeros __all__ = ["topk_accuracy"] def _assert_equal( expect: Tensor, actual: Tensor, *, maxerr: float = 0.0001, verbose: bool = False ): r"""Asserts two tensors equal and returns expected value (first input). It is a variant of python assert which is symbolically traceable (similar to ``numpy.testing.assert_equal``). If we want to verify the correctness of model, just ``assert`` its states and outputs. While sometimes we need to verify the correctness at different backends for *dumped* model (or in :class:`~jit.trace` context), and no python code could be executed in that case. Thus we have to use :func:`~functional.utils._assert_equal` instead. Args: expect: expected tensor value actual: tensor to check value maxerr: max allowed error; error is defined as the minimal of absolute and relative error verbose: whether to print maxerr to stdout during opr exec Examples: >>> x = Tensor([1, 2, 3], dtype="float32") >>> y = Tensor([1, 2, 3], dtype="float32") >>> F.utils._assert_equal(x, y, maxerr=0) Tensor([1. 2. 3.], device=xpux:0) """ err = ( abs(expect - actual) / maximum( minimum(abs(expect), abs(actual)), Tensor(1.0, dtype="float32", device=expect.device), ) ).max() result = apply(AssertEqual(maxerr=maxerr, verbose=verbose), expect, actual, err)[0] _sync() # sync interpreter to get exception return result def _simulate_error(): x1 = zeros(100) x2 = ones(100) (ret,) = apply(AssertEqual(maxerr=0, verbose=False), x1, x2, x2) return ret topk_accuracy = deprecated_func( "1.3", "megengine.functional.metric", "topk_accuracy", True ) copy = deprecated_func("1.3", "megengine.functional.tensor", "copy", True)
PypiClean
/node_managment_application-0.0.1.tar.gz/node_managment_application-0.0.1/nms_app/static/admin/js/SelectBox.js
'use strict'; { const SelectBox = { cache: {}, init: function(id) { const box = document.getElementById(id); SelectBox.cache[id] = []; const cache = SelectBox.cache[id]; for (const node of box.options) { cache.push({value: node.value, text: node.text, displayed: 1}); } }, redisplay: function(id) { // Repopulate HTML select box from cache const box = document.getElementById(id); const scroll_value_from_top = box.scrollTop; box.innerHTML = ''; for (const node of SelectBox.cache[id]) { if (node.displayed) { const new_option = new Option(node.text, node.value, false, false); // Shows a tooltip when hovering over the option new_option.title = node.text; box.appendChild(new_option); } } box.scrollTop = scroll_value_from_top; }, filter: function(id, text) { // Redisplay the HTML select box, displaying only the choices containing ALL // the words in text. (It's an AND search.) const tokens = text.toLowerCase().split(/\s+/); for (const node of SelectBox.cache[id]) { node.displayed = 1; const node_text = node.text.toLowerCase(); for (const token of tokens) { if (!node_text.includes(token)) { node.displayed = 0; break; // Once the first token isn't found we're done } } } SelectBox.redisplay(id); }, get_hidden_node_count(id) { const cache = SelectBox.cache[id] || []; return cache.filter(node => node.displayed === 0).length; }, delete_from_cache: function(id, value) { let delete_index = null; const cache = SelectBox.cache[id]; for (const [i, node] of cache.entries()) { if (node.value === value) { delete_index = i; break; } } cache.splice(delete_index, 1); }, add_to_cache: function(id, option) { SelectBox.cache[id].push({value: option.value, text: option.text, displayed: 1}); }, cache_contains: function(id, value) { // Check if an item is contained in the cache for (const node of SelectBox.cache[id]) { if (node.value === value) { return true; } } return false; }, move: function(from, to) { const from_box = document.getElementById(from); for (const option of from_box.options) { const option_value = option.value; if (option.selected && SelectBox.cache_contains(from, option_value)) { SelectBox.add_to_cache(to, {value: option_value, text: option.text, displayed: 1}); SelectBox.delete_from_cache(from, option_value); } } SelectBox.redisplay(from); SelectBox.redisplay(to); }, move_all: function(from, to) { const from_box = document.getElementById(from); for (const option of from_box.options) { const option_value = option.value; if (SelectBox.cache_contains(from, option_value)) { SelectBox.add_to_cache(to, {value: option_value, text: option.text, displayed: 1}); SelectBox.delete_from_cache(from, option_value); } } SelectBox.redisplay(from); SelectBox.redisplay(to); }, sort: function(id) { SelectBox.cache[id].sort(function(a, b) { a = a.text.toLowerCase(); b = b.text.toLowerCase(); if (a > b) { return 1; } if (a < b) { return -1; } return 0; } ); }, select_all: function(id) { const box = document.getElementById(id); for (const option of box.options) { option.selected = true; } } }; window.SelectBox = SelectBox; }
PypiClean
/Giraffe_View-0.0.9.5.tar.gz/Giraffe_View-0.0.9.5/Giraffe_View/function.py
import subprocess import re import os from termcolor import colored from subprocess import Popen, PIPE def print_with_color(input_string): print(colored(input_string, "green")) def error_with_color(input_string): print(colored(input_string, "red")) def cmd_shell(cammands, string): process = Popen(cammands.split(' '), stdout=subprocess.DEVNULL, universal_newlines=True) process.wait() err = process.communicate() if process.returncode == 0: # print('{} SUCCESS'.format(string)) pass else: # print('{} FAILED'.format(string)) error_with_color(err) def Data_process(read, ref, threads=10): # Define the commands as a list of strings to avoid issues with spaces # in file names or command arguments # path = os.getcwd() cmd0 = ["mkdir", "-p", "results/observed_quality"] cmd1 = ["seqkit", "seq", read, "-m", "200", "-Q", "7", "-g", "-j", str(threads), "-o", "results/observed_quality/clean.fastq"] cmd2 = ["minimap2", "-ax", "map-ont", "-o", "results/observed_quality/tmp.sam", "--MD", "--secondary=no", "-L", "-t", str(threads), ref, "results/observed_quality/clean.fastq"] cmd3 = ["samtools", "view", "-bS", "-F4", "-@", str(threads), "-o", "results/observed_quality/tmp.bam", "results/observed_quality/tmp.sam"] cmd4 = ["samtools", "sort", "-@", str(threads), "-o", "results/observed_quality/tmp.sort.bam", "results/observed_quality/tmp.bam"] cmd5 = ["samtools", "index", "-@", str(threads), "results/observed_quality/tmp.sort.bam"] cmd6 = ["rm", "-rf", "results/observed_quality/tmp.sam", "results/observed_quality/tmp.bam"] # Run each command and check the return code for i, cmd in enumerate([cmd0, cmd1, cmd2, cmd3, cmd4, cmd5, cmd6]): try: subprocess.run(cmd, check=True) # print("Command {} succeeded".format(i + 1)) except subprocess.CalledProcessError as e: print("Command {} failed with error code {}".format(i + 1, e.returncode)) print(e.output) # Raise an exception to indicate that processing failed raise Exception("Data processing failed") def mkdir_d(input_name): mes = "results/" + str(input_name) cmd = ["mkdir", "-p", str(mes)] subprocess.run(cmd, check=True) def count_indel_and_snv(str): dict = {} for i in str: dict[i] = dict.get(i, 0) + 1 return dict #remove the insertion (I) in the tail of string def remove_I(string): while string[-1] == "I": string = string[:-1] return(string) # remove soft (S) and hard (H) clip in CIGAR and return the matched pairs def remove_clip_list(input_cigar, input_pairs, input_ID): remove_cigarstring = re.findall(r"\d+[S, H]+", input_cigar) #HH & 0H & H0 & 00 if ((len(remove_cigarstring) == 2) and (remove_cigarstring[0][-1] == remove_cigarstring[1][-1] == "H")) or ((len(remove_cigarstring) == 1) and (remove_cigarstring[-1] == "H")) or (len(remove_cigarstring) == 0): valid_pairs = input_pairs #SS elif (len(remove_cigarstring) == 2) and (remove_cigarstring[0][-1] == remove_cigarstring[1][-1] == "S"): remove_start_site = int(remove_cigarstring[0][:-1]) tmp_pairs = input_pairs[remove_start_site:] remove_end_site = int(remove_cigarstring[1][:-1]) valid_pairs = tmp_pairs[:len(tmp_pairs)-remove_end_site] # 0S & HS elif ((len(remove_cigarstring) == 1) and (input_cigar[-1] == "S")) or (len(remove_cigarstring) == 2) and (remove_cigarstring[0][-1] == "H") and ((remove_cigarstring[1][-1] == "S")): remove_end_site = int(remove_cigarstring[-1][:-1]) valid_pairs = input_pairs[:len(input_pairs)-remove_end_site] # S0 & SH elif (len(remove_cigarstring) == 1) and (input_cigar[-1] != "S") or ((len(remove_cigarstring) == 2) and (remove_cigarstring[0][-1] == "S") and (remove_cigarstring[1][-1] == "H")): remove_start_site = int(remove_cigarstring[0][:-1]) valid_pairs = input_pairs[remove_start_site:] else: print(str(input_ID) + ", please recheck this CIGAR and MD!") return(valid_pairs) """ only for base A T G C (read_position, ref_position, "ref_base") none √ √ Deletion(D) √ none none Insertion(I) √ √ N(A,T,G,C) Match(M) √ √ n(a,t,g,c) Substitution(S) """ def get_base_alignment(input_list): map_list = ["A", "T", "G", "C"] result = "" if input_list[0] == None: result = "D" # D = deletion else: if input_list[1] == None: result = "I" # I = insertion else: if input_list[2] in map_list: result = "M" # M = match else: result = "S" # S = substitution return result
PypiClean
/Kotti-2.0.9.tar.gz/Kotti-2.0.9/kotti/migrate.py
import os from typing import Callable from typing import List import pkg_resources from alembic.config import Config from alembic.environment import EnvironmentContext from alembic.script import ScriptDirectory from alembic.util import load_python_file from zope.sqlalchemy import mark_changed from kotti import DBSession from kotti import conf_defaults from kotti import get_settings from kotti.util import command KOTTI_SCRIPT_DIR = pkg_resources.resource_filename("kotti", "alembic") DEFAULT_LOCATION = "kotti:alembic" class ScriptDirectoryWithDefaultEnvPy(ScriptDirectory): @property def env_py_location(self) -> str: loc = super().env_py_location if not os.path.exists(loc): loc = os.path.join(KOTTI_SCRIPT_DIR, "env.py") return loc def run_env(self) -> None: dir_, filename = self.env_py_location.rsplit(os.path.sep, 1) load_python_file(dir_, filename) class PackageEnvironment: def __init__(self, location: str) -> None: self.location = location self.config = self._make_config(location) self.script_dir = self._make_script_dir(self.config) @property def pkg_name(self) -> str: return self.location.split(":")[0] @property def version_table(self) -> str: return f"{self.pkg_name}_alembic_version" def run_env(self, fn: Callable, **kw) -> None: with EnvironmentContext( self.config, self.script_dir, fn=fn, version_table=self.version_table, **kw ): self.script_dir.run_env() @staticmethod def _make_config(location: str) -> Config: cfg = Config() cfg.set_main_option("script_location", location) cfg.set_main_option("sqlalchemy.url", get_settings()["sqlalchemy.url"]) return cfg @staticmethod def _make_script_dir(alembic_cfg: Config) -> ScriptDirectoryWithDefaultEnvPy: script_dir = ScriptDirectory.from_config(alembic_cfg) script_dir.__class__ = ScriptDirectoryWithDefaultEnvPy # O_o return script_dir def get_locations() -> List[str]: conf_str = get_settings()["kotti.alembic_dirs"] return [line.strip() for line in conf_str.split() if line.strip()] def stamp_head(location: str = DEFAULT_LOCATION, revision: None = None) -> None: env = PackageEnvironment(location) def do_stamp(rev, context, revision=revision): if revision is None: revision = context.script.get_current_head() elif revision == "None": revision = None context.stamp(env.script_dir, revision) mark_changed(DBSession()) return [] env.run_env(do_stamp) def stamp_heads() -> None: for location in get_locations(): stamp_head(location) def upgrade(location=DEFAULT_LOCATION, revision=None): # We don't want to fire any kind of events during a migration, # because "migrations are a low-level thing". from kotti import events events.clear() pkg_env = PackageEnvironment(location) if revision is None: revision = pkg_env.script_dir.get_current_head() print(f"Upgrading {pkg_env.location}:") def upgrade(heads, context): # alembic supports multiple heads, we don't. # initial revision is () in alembic >= 0.7 rev = heads[0] if heads else None if rev == revision: print(" - already up to date.") return [] print(f" - upgrading from {rev} to {revision}...") return context.script._upgrade_revs(revision, rev) pkg_env.run_env(upgrade, starting_rev=None, destination_rev=revision) print() def upgrade_all(): for location in get_locations(): upgrade(location) def list_all(): pkg_envs = [PackageEnvironment(location) for location in get_locations()] for pkg_env in pkg_envs: print(f"{pkg_env.pkg_name}:") for script in pkg_env.script_dir.walk_revisions(): print( " - {} -> {}: {}".format( script.down_revision, script.revision, script.doc ) ) def current_revision(rev, context): rev = rev[0] if rev else None print(f" - current revision: {rev}") return [] pkg_env.run_env(current_revision) print() def kotti_migrate_command(): __doc__ = """Migrate Kotti and Kotti add-ons. Usage: kotti-migrate <config_uri> list_all kotti-migrate <config_uri> upgrade [--scripts=<location>] [--rev=<rev>] kotti-migrate <config_uri> upgrade_all kotti-migrate <config_uri> stamp_head [--scripts=<location>] [--rev=<rev>] o 'list_all' prints a list of all available migrations of Kotti and registered add-ons. o 'upgrade' will run Kotti's upgrades to upgrade the database to the latest version. Use '--scripts=kotti_myaddon:alembic' to run the upgrades of the 'kotti_myaddon' package instead. o 'upgrade_all' will run all upgrades of all packages registered with Kotti. o 'stamp_head' allows you to manually set the stamped version to the latest version inside the 'kotti_alembic_version' table, that is, without actually running any migrations. You may use this command for a different package by using the '--scripts' option. Options: -h --help Show this screen. """ # We need to turn off populators and root_factory when we run # migrations, because they would access the database, which may # not be possible prior to the migration. # # Unfortunately, we're not able to just set the 'kotti.populators' # setting to an empty list. Since add-ons might add to this list # again later, when we call 'bootstrap' (and thus their # 'includeme' function). save_conf_defaults = conf_defaults.copy() os.environ["KOTTI_DISABLE_POPULATORS"] = "1" conf_defaults["kotti.root_factory"] = [lambda req: None] def callback(arguments): args = () args_with_location = (arguments["--scripts"] or DEFAULT_LOCATION,) if arguments["list_all"]: func = list_all elif arguments["upgrade"]: func = upgrade args = args_with_location + (arguments["--rev"],) elif arguments["upgrade_all"]: func = upgrade_all elif arguments["stamp_head"]: func = stamp_head args = args_with_location + (arguments["--rev"],) else: raise ValueError("Unknown command") func(*args) try: return command(callback, __doc__) finally: conf_defaults.clear() conf_defaults.update(save_conf_defaults) del os.environ["KOTTI_DISABLE_POPULATORS"]
PypiClean
/AcDummyLib-0.2.0.tar.gz/AcDummyLib-0.2.0/README.md
# Assetto Corsa dammy Python library Dummy library for Assetto Corsa native functions presented in Python. Actually, it has a single interface for `ac` module. The main goal of this package: provide convenient autocomplete in IDE (tested in PyCharm). ## Installation to develop AC mod You will need to install the package: ```shell pip install AcDummyLib ``` Add following in your script instead of `import ac`: ```python if __name__ == '__main__': ### pip install AcDummyLib from AcDummyLib import ac else: import ac ``` Now you can check your IDE, autocomplete shall work. ## Contribution You are very welcome to add changes into this code. =) Please feel free to push merge/pull requests. Or, you may raise an issue to highlight found discrepancies. ## Roadmap - Migrate function descriptions into the interface file. ## References #### Source documents: - https://docs.google.com/document/d/13trBp6K1TjWbToUQs_nfFsB291-zVJzRZCNaTYt4Dzc/pub - https://assettocorsamods.net/attachments/inofficial_acpythondoc_v2-pdf.7415/ #### Initial forum threads: - https://assettocorsamods.net/threads/doc-python-doc.59 - https://assettocorsamods.net/threads/is-there-a-way-to-load-ac-library-to-have-autocomplete-in-an-ide-e-g-pycharm.3088/
PypiClean
/Gestus-0.3.4.tar.gz/Gestus-0.3.4/gestus/migrations/0003_fill_environment_url.py
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): for environment in orm.WebsiteEnvironment.objects.all(): environment.url = environment.website.url environment.save() def backwards(self, orm): "Write your backwards methods here." models = { u'gestus.egg': { 'Meta': {'object_name': 'Egg'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'package': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, u'gestus.eggversion': { 'Meta': {'object_name': 'EggVersion'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'egg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['gestus.Egg']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}) }, u'gestus.website': { 'Meta': {'object_name': 'Website'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}) }, u'gestus.websiteenvironment': { 'Meta': {'object_name': 'WebsiteEnvironment'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'eggs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['gestus.EggVersion']", 'symmetrical': 'False', 'blank': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'server': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'website': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'environments'", 'to': u"orm['gestus.Website']"}) } } complete_apps = ['gestus'] symmetrical = True
PypiClean
/Activate_App-0.0.10-py3-none-any.whl/activate/app/checklist.py
from __future__ import annotations from typing import overload from PyQt5 import QtWidgets from PyQt5.QtCore import Qt Unchecked, PartiallyChecked, Checked = Qt.Unchecked, Qt.PartiallyChecked, Qt.Checked class CheckList(QtWidgets.QListWidget): """A QListWidget with checkboxes on items.""" def __init__(self, *args, **kwargs): self.do_not_recurse = False self.all_row = False super().__init__(*args, **kwargs) self.itemChanged.connect(self.item_changed) self.itemDoubleClicked.connect(self.item_double_clicked) @overload def __getitem__(self, index: int) -> QtWidgets.QListWidgetItem: ... @overload def __getitem__(self, index: slice) -> list[QtWidgets.QListWidgetItem]: ... def __getitem__(self, index): if isinstance(index, slice): return [self.item(i) for i in range(len(self))[index]] result = self.item(index) if result is None: raise IndexError(f"{self.__class__.__qualname__} index out of range") return result @property def row_names(self): return [row.text() for row in self] @row_names.setter def row_names(self, new_items): self.clear() self.addItems(new_items) for row in self: row.setCheckState(Unchecked) @property def states(self): return {row.text(): row.checkState() for row in self} @states.setter def states(self, new_states): for index, item in enumerate(self.row_names): if item in new_states: self.set_check_state(index, new_states[item]) @property def num_states(self): return { row.text(): {Unchecked: 0, PartiallyChecked: 0.5, Checked: 1}[ row.checkState() ] for row in self } @num_states.setter def num_states(self, new_states): for index, item in enumerate(self.row_names): if item in new_states: if new_states[item] == 0: self.set_check_state(index, Unchecked) elif new_states[item] == 0.5: self.set_check_state(index, PartiallyChecked) elif new_states[item] == 1: self.set_check_state(index, Checked) def get_row(self, row): """Get a row from a string, index or row.""" if isinstance(row, str): for real_row in self: if real_row.text() == row: return real_row raise ValueError(f"{row} is not a row.") if isinstance(row, int): return self[row] return row def set_check_state(self, row, state): self.get_row(row).setCheckState(state) def check_state(self, row): return self.get_row(row).checkState() @property def checked_rows(self): return [r.text() for r in self if r.checkState() == Checked] def item_changed(self, item): if self.do_not_recurse or not self.all_row: return self.stop_updates() if self.is_all(item): for item_ in self[1:]: item_.setCheckState(item.checkState()) else: states = {i.checkState() for i in self[1:]} self.set_all_state( next(iter(states)) if len(states) == 1 else PartiallyChecked ) self.start_updates() def item_double_clicked(self, item): if self.is_all(item): self.set_all_state(Checked) return self.stop_updates() if self.all_row and len(self) > 2: self.set_all_state(PartiallyChecked) for item_ in self: if not self.is_all(item_): item_.setCheckState(Checked if item_ is item else Unchecked) self.start_updates() def check_all(self): for row in self: row.setCheckState(Checked) def add_all_row(self): self.insertItem(0, "All") self.all_row = True def is_all(self, item): """Check if a row is the 'All' row.""" return self.all_row and self.row(item) == 0 def set_all_state(self, state): if self.all_row: self.set_check_state(0, state) def stop_updates(self): self.do_not_recurse = True self.blockSignals(True) def start_updates(self): self.do_not_recurse = False self.blockSignals(False)
PypiClean
/DBQuery-0.4.1.tar.gz/DBQuery-0.4.1/src/dbquery/postgres.py
from psycopg2 import OperationalError as PGOperationalError from psycopg2 import connect from .db import DB from .query import SelectOne class _NextVal(SelectOne): def __init__(self, db, sequence): super(_NextVal, self).__init__( db, 'SELECT nextval(\'{}\')'.format(sequence), None) class PostgresDB(DB): """ PostgreSQL DB class using a single psycopg2 connection. Use either a 'dsn' connection string or keyword parameter to define the connection (from the psycopg2 documentation): database – the database name (only as keyword argument) user – user name used to authenticate password – password used to authenticate host – database host address (defaults to UNIX socket if not provided) port – connection port number (defaults to 5432 if not provided) """ OperationalError = PGOperationalError def __init__(self, dsn=None, retry=0, **kwds): super(PostgresDB, self).__init__(retry=retry) self._kwds = kwds or {} if dsn: self._kwds["dsn"] = dsn self._connection = None def _connect(self): if self._connection is not None: raise RuntimeError("Connection still exists.") self._connection = connect(**self._kwds) self._connection.set_session(autocommit=True) def close(self): if self._connection is not None: try: self._connection.close() except Exception: pass # ignore self._connection = None @DB.connected def execute(self, sql, params, return_function=None): with self._connection.cursor() as cursor: cursor.execute(sql, params) if return_function: return return_function(cursor) @DB.connected def nonclosing_execute(self, sql, params, return_function=None): cursor = self._connection.cursor() cursor.execute(sql, params) return cursor @DB.connected def show(self, sql, params): with self._connection.cursor() as cursor: return cursor.mogrify(sql, params).decode( self._connection.encoding) def NextVal(self, sequence): return _NextVal(self, sequence) @DB.connected def _begin(self): self._connection.autocommit = False def _commit(self): if self._connection is None: raise RuntimeError("Connection lost, can not commit!") self._connection.commit() self._connection.autocommit = True def _rollback(self): if self._connection is None: raise RuntimeError("Connection lost, can not roll back!") self._connection.rollback() self._connection.autocommit = True
PypiClean
/MyAutoman-1.7.3.tar.gz/MyAutoman-1.7.3/Automan/plugins/odps.py
import numpy as np from scipy import stats import matplotlib.pyplot as plt import matplotlib.font_manager # from pyod.models.abod import ABOD # 基于概率 # from pyod.models. knn import KNN # 基于近邻 # from pyod.models.mcd import MCD #基于线性模型 # from pyod.models.ocsvm import OCSVM import pandas as pd from Automan.plugins.DDViz import out_null,auto_bin,manual_bin,out_iv,plt_multi_mosaic,plt_mosaic from tqdm import tqdm import joblib import datetime from sklearn.preprocessing import StandardScaler,MinMaxScaler from multiprocessing.pool import ThreadPool class ODPS(): def __init__(self, col_x, data_parts='data_parts', data_parts_use=None, algo_dict = {}, num_of_round =50 , num_of_compile=5, play_report=True, verbose=True, ks_limit=0.02, is_filter=True , fill_dict={-99:-1}, scale_type=1, n_jobs = 2, contamination=.2): ''' Parameters ---------- col_x: list 特征列表. data_parts: str 数据集切分字段. data_parts_use: list or None 样本分析列表,eg:[1,2],只会分析data_parts为1和2的样本,如果为None分析整个数据集 algo_dict : dict 算法列表 num_of_round : int 变量生成轮次 : default 50 num_of_compile : int 组合变量个数 : default 5 play_report : bool 是否生成报告 : default True verbose : bool ks_limit : float 复合变量的ks下限 : default 0.02 is_filter : bool 是否做变量筛选 : default True fill_dict : dict 变量值映射列表 : default {-99 : -1} scale_type : int 归一化方法 : default 1 n_jobs : int 进程数 : default 1 contamination : float 污染比例 : 0.2 ''' self.col_x = col_x self.data_parts = data_parts self.data_parts_use = data_parts_use self.dict_map=dict() self.algo_dict = algo_dict self.var_list = col_x self.num_of_round = num_of_round self.num_of_compile = num_of_compile self.play_report = play_report self.verbose = verbose self.ks_limit = ks_limit self.is_filter = is_filter self.fill_dict = fill_dict self.scale_type = scale_type self.n_jobs = n_jobs self.contamination = contamination def _preprocess(self, data, training=True): '''变量预处理方法 Args: data : pandas.DataFrame : 待处理数据集 training : bool : 是否为测试机 Return: data : pandas.DataFrame ''' mem_cols = data.columns data = data.fillna(-99) data = data.replace(self.fill_dict) # 归一化过程训练 if training: if self.scale_type == 1: self.st = StandardScaler() elif self.scale_type ==2: self.st = MinMaxScaler() if self.scale_type !=1 and self.scale_type!=2 and self.scale_type!=0: raise ValueError('scale_type must be 1--StandardScaler, 2--MinMaxScaler') if self.scale_type==1 or self.scale_type==2 : self.st.fit_transform(data) # 归一化过程演绎 data = self.st.transform(data) data = pd.DataFrame(data) data.columns = list(mem_cols) return data def fit(self,data,y=None): '''fit函数 Parameters ---------- data: pd.DataFrame 数据集-数据集中需要包含col_x所有字段,如果y不为None,数据集中需要包含y字段 Returns ------- self:对象本身对象本身 ''' if y is None: raise TypeError('missing argument: ''y''') if self.data_parts_use is None and self.data_parts is not None: self.data_parts_use = list(data.data_parts.unique()) if self.data_parts_use: self.train_X = data.loc[data[self.data_parts].isin(self.data_parts_use),:].reset_index(drop=True).copy(deep=True) self.val_X = data.loc[~data[self.data_parts].isin(self.data_parts_use),:].reset_index(drop=True).copy(deep=True) self.train_Y = pd.DataFrame(y).loc[data[self.data_parts].isin(self.data_parts_use),:].reset_index(drop=True).copy(deep=True).iloc[:,0] self.val_Y = pd.DataFrame(y).loc[~data[self.data_parts].isin(self.data_parts_use),:].reset_index(drop=True).copy(deep=True).iloc[:,0] else: self.train_X = data.copy(deep=True) self.val_X = data.copy(deep=True) self.train_Y = y.copy() self.val_Y = y.copy() if len(self.val_X) == 0: self.val_X = data.copy(deep=True) self.val_Y = y.copy(deep=True) #self.train_Y = self.train_X.pop(y) #self.val_Y = self.val_X.pop(y) self.choose_var_dict = {} self.final_var_dict = {} self.algo_names = list(self.algo_dict.keys()) # 训练集变量衍生数据集 self.output_train_set = self.train_X[self.var_list].copy() # 验证集变量衍生数据集 self.output_val_set = self.val_X[self.var_list].copy() # var to id 映射 self.var2id = {var : idx for idx, var in enumerate(self.var_list)} # id to var 映射 self.id2var = {idx : var for idx, var in enumerate(self.var_list)} # 变量衍生列表 self.choose_var_list = [] # 最终变量列表 self.final_var_list = [] self.algo_lists = [] self.outdict = {} # 中间特征重要性存贮字典 self.imp_var_dict = {} # 中间算法重要性存贮字典 self.imp_algo_dict = {} self.ks_iv_outs = None # 归一化实例 self.st = None # 数据集缺失值填空及归一化预处理 if self.scale_type in [0, 1, 2]: self.train_X_normed = self._preprocess(self.train_X[self.var_list], True) self.val_X_normed = self._preprocess(self.val_X[self.var_list], False) else: self.train_X_normed = self.train_X self.val_X_normed = self.val_X # 训练集变量衍生数据集 self.output_train_set = self.train_X.copy() # 验证集变量衍生数据集 self.output_val_set = self.val_X.copy() def _fun_var_choice(i): ##for i in tqdm(range(self.num_of_round)): # 计算特征重要性 var_imp_dict_rnd = self._cal_feature_importance() # 计算算法重要性 algo_imp_dict_rnd = self._cal_algo_importance() # 特征重要性归一化 p_algo = [algo_imp_dict_rnd[t] for t in self.algo_names] p_algo = p_algo / np.sum(p_algo) # 算法重要性归一化 p_var = [var_imp_dict_rnd[t] for t in self.var_list] p_var = p_var / np.sum(p_var) # 按照算法重要性分布选取变量 t_algo = np.random.choice(self.algo_names, 1, p=p_algo,replace=False)[0] t_clf = self.algo_dict[t_algo](contamination=self.contamination) # 按照特征重要性分布选取变量 t_vars = np.random.choice(self.var_list, self.num_of_compile, p=p_var,replace=False) # 按变量入模顺序排序 t_ids = sorted([self.var2id[i] for i in t_vars]) # 合并变量id作为变量名 t_var_name = 'odps_' + t_algo +''.join([str(i) + '_' for i in t_ids])[:-1] # 打印变量名 if self.verbose: print(t_var_name) # 防止生成重复变量 if t_var_name not in self.choose_var_list: # 初始化变量字典 self.choose_var_dict[t_var_name] = [] # 添加到变量选择列表 self.choose_var_list.append(t_var_name) # 参数训练 t_clf.fit(self.train_X_normed[t_vars]) # 将异常算法实例添加到算法列表 self.algo_lists.append(t_clf) # 变量字典t_var_name : [t_clf] self.choose_var_dict[t_var_name].append(t_clf) # 变量字典t_var_name : [t_clf, [va1, va2...]] self.choose_var_dict[t_var_name].append(t_vars) # Train # 训练集预测 y_pred = t_clf.predict_proba(self.train_X_normed[t_vars])[:,0] # 保持已经存在的变量名 orig = self.output_train_set.columns self.output_train_set[t_var_name] = y_pred # 生成输入iv计算器的临时数据集 tmp_df = self.output_train_set.copy() tmp_df = pd.concat([tmp_df, self.train_Y], axis=1) tmp_df.columns = list(orig) + [t_var_name] + ['fpd4'] tmp_df['dt'] = 1 # Validation # 验证集预测 y_pred = t_clf.predict_proba(self.val_X_normed[t_vars])[:,0] # 保持已经存在的变量名 self.output_val_set[t_var_name] = y_pred # 计算本轮生成变量的KS tmp_ks = self._cal_ks(tmp_df, t_var_name) # 遍历所选用的变量 for t_var in t_vars: # 更新变量特征重要性字典 if t_var not in self.imp_var_dict: self.imp_var_dict[t_var] = [] self.imp_var_dict[t_var].append(tmp_ks) # 更新算法重要性字典 if t_algo not in self.imp_algo_dict: self.imp_algo_dict[t_algo] = [] self.imp_algo_dict[t_algo].append(tmp_ks) with ThreadPool(processes=self.n_jobs) as pool: pool.map(_fun_var_choice, range(self.num_of_round)) if self.verbose: print('总共有%d个衍生变量'%len(self.algo_lists)) # 报告生成 if self.play_report is True: self._play_report() # 过滤KS较低变量 if self.is_filter: self._filter_var() else: self.final_var_dict = self.choose_var_dict self.final_var_list = self.choose_var_list ids = [] for k, v in self.final_var_dict.items(): ids.extend(v[1]) ids = list(set(ids)) self.final_var_list_inputs = ids self.col_new = list(data.columns) + list(self.final_var_dict.keys()) # 删除无用的属性,释放空间 del self.train_X del self.val_X del self.train_Y del self.val_Y del self.train_X_normed del self.val_X_normed del self.output_train_set del self.output_val_set def _cal_algo_importance(self): '''_cal_algo_importance函数 Parameters ---------- Returns ------- algo_imp_dict_rnd : dict 算法重要性字典 ''' # 算法重要性字典容器 algo_imp_dict_rnd = {} # 遍历算法列表 for algo_name in self.algo_names: if algo_name not in self.imp_algo_dict: algo_imp_dict_rnd[algo_name] = 0.05 elif self.imp_algo_dict[algo_name] == []: algo_imp_dict_rnd[algo_name] = 0.05 else: algo_imp_dict_rnd[algo_name] = np.mean(self.imp_algo_dict[algo_name]) return algo_imp_dict_rnd def _cal_feature_importance(self): '''_cal_feature_importance函数 Parameters ---------- Returns ------- var_imp_dict_rnd : dict 变量重要性字典 ''' var_imp_dict_rnd = {} for var_name in self.var_list: if var_name not in self.imp_var_dict: var_imp_dict_rnd[var_name] = 0.05 elif self.imp_var_dict[var_name] == []: var_imp_dict_rnd[var_name] = 0.05 else: var_imp_dict_rnd[var_name] = np.mean(self.imp_var_dict[var_name]) return var_imp_dict_rnd def _cal_ks(self, df, var_name): '''_cal_ks函数 Parameters ---------- df : pandas.DataFrame 数据集 var_name : string 变量名 Returns ------- : float 变量KS效果 ''' df["dt_cut"] = 'all' iv_ks = out_iv(df, [var_name], y='fpd4', dt='dt',dt_cut = 'dt_cut',isformat = False) print ("iv_ks = ", iv_ks['df_iv']) del df["dt_cut"] # return iv_ks['df_iv'][var_name] return iv_ks['df_iv'].loc[var_name] def _play_report(self): '''_play_report Parameters ---------- Returns ------- : dict 报告 ''' # 结果容器 outs = {} # 训练集 df_train = pd.concat([self.output_train_set[self.choose_var_list], self.train_Y], axis=1) df_train.columns = self.choose_var_list + ['fpd4'] df_train['dt']= '1' train_iv_ks = out_iv(df_train, self.choose_var_list, y='fpd4', dt='dt',isformat=False,dt_cut = 'dt') outs['train_iv'] = train_iv_ks['df_iv'].loc[self.choose_var_list] outs['train_ks'] = train_iv_ks['df_ks'].loc[self.choose_var_list] # 验证集 df_val = pd.concat([self.output_val_set[self.choose_var_list], self.val_Y], axis=1) df_val.columns = self.choose_var_list + ['fpd4'] df_val['dt']= '1' val_iv_ks = out_iv(df_val, self.choose_var_list, y='fpd4', dt='dt',isformat=False,dt_cut = 'dt') outs['val_iv'] = val_iv_ks['df_iv'].loc[self.choose_var_list] outs['val_ks'] = val_iv_ks['df_ks'].loc[self.choose_var_list] self.ks_iv_outs = outs return outs def _filter_var(self): '''_filter_var Parameters ---------- Returns ------- ''' if self.ks_iv_outs is None: self._play_report() outs = self.ks_iv_outs # print ("!!!outs['train_ks']=!!!",outs['train_ks']) out_train_ks = outs['train_ks'].to_dict()[1] # print ("out_train_ks",out_train_ks) for var_name, ks in out_train_ks.items(): if out_train_ks[var_name] >= self.ks_limit: self.final_var_dict[var_name] = self.choose_var_dict[var_name] self.final_var_list.append(var_name) def transform(self,data,y=None): '''transform函数 Parameters ---------- data: pd.DataFrame 需要转换的数据集 Returns ------- data:pd.DataFrame 转换后的数据集 ''' test_X = data[self.var_list] # 数据预处理 if self.scale_type==1 or self.scale_type==2 : test_X = self._preprocess(test_X, False) output_df = data.copy() # 遍历最终变量字典 for var_name in self.final_var_dict: # 单变量列表 t_dict = self.final_var_dict[var_name] # 算法实例 t_algo = t_dict[0] # 组合变量的单变量列表 t_vars = t_dict[1] # 预测衍生 output_df[var_name] = t_algo.predict_proba(test_X[t_vars])[:,0] return output_df
PypiClean
/NehorayRapid1-0.0.1-py3-none-any.whl/mmedit/models/components/discriminators/multi_layer_disc.py
import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import load_checkpoint from mmedit.models.common import LinearModule from mmedit.models.registry import COMPONENTS from mmedit.utils import get_root_logger @COMPONENTS.register_module() class MultiLayerDiscriminator(nn.Module): """Multilayer Discriminator. This is a commonly used structure with stacked multiply convolution layers. Args: in_channels (int): Input channel of the first input convolution. max_channels (int): The maximum channel number in this structure. num_conv (int): Number of stacked intermediate convs (including input conv but excluding output conv). fc_in_channels (int | None): Input dimension of the fully connected layer. If `fc_in_channels` is None, the fully connected layer will be removed. fc_out_channels (int): Output dimension of the fully connected layer. kernel_size (int): Kernel size of the conv modules. Default to 5. conv_cfg (dict): Config dict to build conv layer. norm_cfg (dict): Config dict to build norm layer. act_cfg (dict): Config dict for activation layer, "relu" by default. out_act_cfg (dict): Config dict for output activation, "relu" by default. with_input_norm (bool): Whether add normalization after the input conv. Default to True. with_out_convs (bool): Whether add output convs to the discriminator. The output convs contain two convs. The first out conv has the same setting as the intermediate convs but a stride of 1 instead of 2. The second out conv is a conv similar to the first out conv but reduces the number of channels to 1 and has no activation layer. Default to False. with_spectral_norm (bool): Whether use spectral norm after the conv layers. Default to False. kwargs (keyword arguments). """ def __init__(self, in_channels, max_channels, num_convs=5, fc_in_channels=None, fc_out_channels=1024, kernel_size=5, conv_cfg=None, norm_cfg=None, act_cfg=dict(type='ReLU'), out_act_cfg=dict(type='ReLU'), with_input_norm=True, with_out_convs=False, with_spectral_norm=False, **kwargs): super().__init__() if fc_in_channels is not None: assert fc_in_channels > 0 self.max_channels = max_channels self.with_fc = fc_in_channels is not None self.num_convs = num_convs self.with_out_act = out_act_cfg is not None self.with_out_convs = with_out_convs cur_channels = in_channels for i in range(num_convs): out_ch = min(64 * 2**i, max_channels) norm_cfg_ = norm_cfg act_cfg_ = act_cfg if i == 0 and not with_input_norm: norm_cfg_ = None elif (i == num_convs - 1 and not self.with_fc and not self.with_out_convs): norm_cfg_ = None act_cfg_ = out_act_cfg self.add_module( f'conv{i + 1}', ConvModule( cur_channels, out_ch, kernel_size=kernel_size, stride=2, padding=kernel_size // 2, norm_cfg=norm_cfg_, act_cfg=act_cfg_, with_spectral_norm=with_spectral_norm, **kwargs)) cur_channels = out_ch if self.with_out_convs: cur_channels = min(64 * 2**(num_convs - 1), max_channels) out_ch = min(64 * 2**num_convs, max_channels) self.add_module( f'conv{num_convs + 1}', ConvModule( cur_channels, out_ch, kernel_size, stride=1, padding=kernel_size // 2, norm_cfg=norm_cfg, act_cfg=act_cfg, with_spectral_norm=with_spectral_norm, **kwargs)) self.add_module( f'conv{num_convs + 2}', ConvModule( out_ch, 1, kernel_size, stride=1, padding=kernel_size // 2, act_cfg=None, with_spectral_norm=with_spectral_norm, **kwargs)) if self.with_fc: self.fc = LinearModule( fc_in_channels, fc_out_channels, bias=True, act_cfg=out_act_cfg, with_spectral_norm=with_spectral_norm) def forward(self, x): """Forward Function. Args: x (torch.Tensor): Input tensor with shape of (n, c, h, w). Returns: torch.Tensor: Output tensor with shape of (n, c, h', w') or (n, c). """ input_size = x.size() # out_convs has two additional ConvModules num_convs = self.num_convs + 2 * self.with_out_convs for i in range(num_convs): x = getattr(self, f'conv{i + 1}')(x) if self.with_fc: x = x.view(input_size[0], -1) x = self.fc(x) return x def init_weights(self, pretrained=None): """Init weights for models. Args: pretrained (str, optional): Path for pretrained weights. If given None, pretrained weights will not be loaded. Defaults to None. """ if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): # Here, we only initialize the module with fc layer since the # conv and norm layers has been intialized in `ConvModule`. if isinstance(m, nn.Linear): nn.init.normal_(m.weight.data, 0.0, 0.02) nn.init.constant_(m.bias.data, 0.0) else: raise TypeError('pretrained must be a str or None')
PypiClean
/DACBench-0.2.0.tar.gz/DACBench-0.2.0/dacbench/instance_sets/cma/Sample CMA Instances.ipynb
``` import numpy as np def save_cma_instances(filename, n_instances=100, dim=10, fcn_ids=[12, 11, 2, 23, 15, 8, 17, 20, 1, 16], fcn_names = ["BentCigar", "Discus", "Ellipsoid", "Katsuura", "Rastrigin", "Rosenbrock", "Schaffers", "Schwefel", "Sphere", "Weierstrass"]): init_locs = list(np.random.randn(n_instances, dim)) init_sigmas = list(np.random.rand(n_instances)) with open(filename, 'a') as f: id_string="ID,dim,fcn_name,fcn_index,init_sigma,init_loc0,init_loc1,init_loc2,init_loc3,init_loc4,init_loc5,init_loc6,init_loc7,init_loc8,init_loc9\n" f.write(id_string) for i in range(n_instances): inst_string = f"{i},10,{fcn_names[i%10]},{fcn_ids[i%10]},{init_sigmas[i]}" for j in range(dim): inst_string += f",{init_locs[i][j]}" inst_string += "\n" f.write(inst_string) save_cma_instances("new_cma_set.csv") ```
PypiClean
/Diofant-0.14.0a2.tar.gz/Diofant-0.14.0a2/docs/release/notes-0.7.2.rst
=========== SymPy 0.7.2 =========== 16 Oct 2012 Major Changes ============= * Python 3 support - SymPy now supports Python 3. The officially supported versions are 3.2 and 3.3, but 3.1 should also work in a pinch. The Python 3-compatible tarballs will be provided separately, but it is also possible to download Python 2 code and convert it manually, via the bin/use2to3 utility. See the README for more. * PyPy support - All SymPy tests pass in recent nightlies of PyPy, and so it should have full support as of the next version after 1.9. * Combinatorics - A new module called Combinatorics was added which is the result of a successful GSoC project. It attempts to replicate the functionality of Combinatorica and currently has full featured support for Permutations, Subsets, Gray codes and Prufer codes. - In another GSoC project, facilities from computational group theory were added to the combinatorics module, mainly following the book "Handbook of computational group theory". Currently only permutation groups are supported. The main functionalities are: basic properties (orbits, stabilizers, random elements...), the Schreier-Sims algorithm (three implementations, in increasing speed: with Jerrum's filter, incremental, and randomized (Monte Carlo)), backtrack searching for subgroups with certain properties. * Definite Integration - A new module called meijerint was added, which is also the result of a successful GSoC project. It implements a heuristic algorithm for (mainly) definite integration, similar to the one used in Mathematica. The code is automatically called by the standard integrate() function. This new algorithm allows computation of important integral transforms in many interesting cases, so helper functions for Laplace, Fourier and Mellin transforms were added as well. * Random Variables - A new module called stats was added. This introduces a RandomSymbol type which can be used to model uncertainty in expressions. * Matrix Expressions - A new matrix submodule named expressions was added. This introduces a MatrixSymbol type which can be used to describe a matrix without explicitly stating its entries. A new family of expression types were also added: Transpose, Inverse, Trace, and BlockMatrix. ImmutableMatrix was added so that explicitly defined matrices could interact with other SymPy expressions. * Sets - A number of new sets were added including atomic sets like FiniteSet, Reals, Naturals, Integers, UniversalSet as well as compound sets like ProductSet and TransformationSet. Using these building blocks it is possible to build up a great variety of interesting sets. * Classical Mechanics - A physics submodule named machanics was added which assists in formation of equations of motion for constrained multi-body systems. It is the result of 3 GSoC projects. Some nontrivial systems can be solved, and examples are provided. * Quantum Mechanics - Density operator module has been added. The operator can be initialized with generic Kets or Qubits. The Density operator can also work with TensorProducts as arguments. Global methods are also added that compute entropy and fidelity of states. Trace and partial-trace operations can also be performed on these density operators. - To enable partial trace operations a Tr module has been added to the core library. While the functionality should remain same, this module is likely to be relocated to an alternate folder in the future. One can currently also use sympy.core.Tr to work on general trace operations, but this module is what is needed to work on trace and partial-trace operations on any sympy.physics.quantum objects. - The Density operators, Tr and Partial trace functionality was implemented as part of student participation in GSoC 2012. - Expanded angular momentum to include coupled-basis states and product-basis states. Operators can also be treated as acting on the coupled basis (default behavior) or on one component of the tensor product states. The methods for coupling and uncoupling these states can work on an arbitrary number of states. Representing, rewriting and applying states and operators between bases has been improved. * Commutative Algebra - A new module ``agca`` was started which seeks to support computations in commutative algebra (and eventually algebraic geometry) in the style of Macaulay2 and Singular. Currently there is support for computing Gröbner bases of modules over a (generalized) polynomial ring over a field. Based on this, there are algorithms for various standard problems in commutative algebra, e.g., computing intersections of submodules, equality tests in quotient rings, etc... * Plotting Module - A new plotting module has been added which uses Matplotlib as its back-end. The plotting module has functions to plot the following: * 2D line plots * 2D parametric plots. * 2D implicit and region plots. * 3D surface plots. * 3D parametric surface plots. * 3D parametric line plots. * Differential Geometry - Thanks to a GSoC project the beginning of a new module covering the theory of differential geometry was started. It can be imported with ``sympy.diffgeom``. It is based on "Functional Differential Geometry" by Sussman and Wisdom. Currently implemented are scalar, vector and form fields over manifolds as well as covariant and other derivatives. Compatibility breaks ==================== - The KroneckerDelta class was moved from ``sympy/physics/quantum/kronecker.py`` to ``sympy/functions/special/tensor_functions.py``. - Merged the KroneckerDelta class in ``sympy/physics/secondquant.py`` with the class above. - The Dij class in ``sympy/functions/special/tensor_functions.py`` was replaced with KroneckerDelta. - The errors raised for invalid ``float`` calls on SymPy objects were changed in order to emulate more closely the errors raised by the standard library. The ``__float__`` and ``__complex__`` methods of ``Expr`` are concerned with that change. - The ``solve()`` function returns empty lists instead of ``None`` objects if no solutions were found. Idiomatic code of the form ``sol = solve(...); if sol:...`` will not be affected by this change. - Piecewise no longer accepts a Set or Interval as a condition. One should explicitly specify a variable using ``Set().contains(x)`` to obtain a valid conditional. - The statistics module has been deprecated in favor of the new stats module. - ``sympy/galgebra/GA.py``: * ``set_main()`` is no longer needed * ``make_symbols()`` is deprecated (use ``sympy.symbols()`` instead) * the symbols used in this package are no longer broadcast to the main program - The classes for Infinity, NegativeInfinity, and NaN no longer subclass from Rational. Creating a Rational with 0 in the denominator will still return one of these classes, however. Minor changes ============= - A new module ``gaussopt`` was added supporting the most basic constructions from Gaussian optics (ray tracing matrices, geometric rays and Gaussian beams). - New classes were added to represent the following special functions: classical and generalized exponential integrals (Ei, expint), trigonometric (Si, Ci) and hyperbolic integrals (Shi, Chi), the polylogarithm (polylog) and the Lerch transcendent (lerchphi). In addition to providing all the standard sympy functionality (differentiation, numerical evaluation, rewriting ...), they are supported by both the new meijerint module and the existing hypergeometric function simplification module. - An ImmutableMatrix class was created. It has the same interface and functionality of the old Matrix but is immutable and inherits from Basic. - A new function in ``geometry.util`` named ``centroid`` was added which will calculate the centroid of a collection of geometric entities. And the polygon module now allows triangles to be instantiated from combinations of side lengths and angles (using keywords sss, asa, sas) and defines utility functions to convert between degrees and radians. - In ``ntheory.modular`` there is a function (``solve_congruence``) to solve congruences such as "What number is 2 mod 3, 3 mod 5 and 2 mod 7?" - A utility function named ``find_unit`` has been added to physcis.units that allows one to find units that match a given pattern or contain a given unit. - There have been some additions and modifications to Expr's methods: - Although the problem of proving that two expressions are equal is in general a difficult one (since whatever algorithm is used, there will always be an expression that will slip through the algorithm) the new method of Expr named ``equals`` will do its best to answer whether A equals B: A.equals(B) might given True, False or None. - coeff now supports a third argument ``n`` (which comes 2nd now, instead of ``right``). This ``n`` is used to indicate the exponent on x which one seeks: ``(x**2 + 3*x + 4).coeff(x, 1)`` -> ``3``. This makes it possible to extract the constant term from a polynomial: ``(x**2 + 3*x + 4).coeff(x, 0)`` -> ``4``. - The method ``round`` has been added to round a SymPy expression to a given a number of decimal places (to the left or right of the decimal point). - divmod is now supported for all SymPy numbers. - In the simplify module, the algorithms for denesting of radicals (sqrtdenest) and simplifying gamma functions (in combsimp) has been significantly improved. - The mathematica-similar ``TableForm`` function has been added to the printing.tableform module so one can easily generate tables with headings. - The expand API has been updated. ``expand()`` now officially supports arbitrary ``_eval_expand_hint()`` methods on custom objects. ``_eval_expand_hint()`` methods are now only responsible for expanding the top-level expression. All ``deep=True`` related logic happens in ``expand()`` itself. See the docstring of ``expand()`` for more information and an example. - Two options were added to ``isympy`` to aid in interactive usage. ``isympy -a`` automatically creates symbols, so that typing something like ``a`` will give ``Symbol('a')``, even if you never typed ``a = Symbol('a')`` or ``var('a')``. ``isympy -i`` automatically wraps integer literals with Integer, so that ``1/2`` will give ``Rational(1, 2)`` instead of ``0.5``. ``isympy -I`` is the same as ``isympy -a -i``. ``isympy -I`` makes isympy act much more like a traditional interactive computer algebra system. These both require IPython. - The official documentation at https://docs.sympy.org/ now includes an extension that automatically hooks the documentation examples in to `SymPy Live <https://live.sympy.org>`_. In addition to the more noticeable changes listed above, there have been numerous smaller additions, improvements and bug fixes in the commits in this release. See the git log for a full list of all changes. The command ``git log sympy-0.7.1..sympy-0.7.2`` will show all commits made between this release and the last. You can also see the issues closed since the last release `here <https://github.com/sympy/sympy/issues?utf8=%E2%9C%93&q=is%3Aissue%20closed%3A%222011-07-29%20..%202012-10-16%22>`_.
PypiClean
/DNASpiderWeb-1.1-py3-none-any.whl/dsw/biofilter.py
class DefaultBioFilter(object): def __init__(self, screen_name): """ Initialize the default screen. :param screen_name: name of screen. :type screen_name: str """ self.screen_name = screen_name def valid(self, dna_string): """ Judge whether the DNA string meets the requirements. :param dna_string: DNA string to be judged. :type dna_string: str :raise: this interface needs to be implemented. :return: judgement. :rtype: bool """ raise NotImplementedError("This interface \"def valid(dna_string)\" needs to be implemented.") class LocalBioFilter(DefaultBioFilter): def __init__(self, observed_length, max_homopolymer_runs=None, gc_range=None, undesired_motifs=None): """ Initialize the screen of local biochemical constraints. :param observed_length: length of the DNA sequence observed in the window. :type observed_length: int :param max_homopolymer_runs: maximum homopolymer runs. :type max_homopolymer_runs: int :param gc_range: range of GC content. :type gc_range: list :param undesired_motifs: undesired DNA motifs. :type undesired_motifs: list Example >>> from dsw import LocalBioFilter >>> bio_filter = LocalBioFilter(observed_length=8, \ max_homopolymer_runs=2, gc_range=[0.4, 0.6], undesired_motifs=["GC"]) >>> bio_filter.valid(dna_sequence="ACGTACGT") True >>> bio_filter.valid(dna_sequence="GCATGCAT") False >>> bio_filter.valid(dna_sequence="AAACCGGA") False .. notes:: Reference [1] Nick Goldman et al. (2013) Nature Reference [2] Yaniv Erlich and Dina Zielinski (2017) Science Reference [3] William H. Press et al. (2020) Proceedings of the National Academy of Sciences Reference [4] Hannah F Lochel et al. (2021) Nucleic Acids Research If the maximum homopolymer runs (max_homopolymer_runs) is 1, "AA", "CC", "GG", "TT" cannot be included in tue valid DNA sequences. If the range of GC content (gc_range) is [0.4, 0.6], the GC content of valid DNA sequences must between 40% and 60%. If "GC" in the undesired DNA motifs (undesired_motifs), "GC" cannot be included in tue valid DNA sequences. This parameter could contain the restriction enzyme sites or some low compatibility DNA patterns. """ super().__init__(screen_name="Local") if max_homopolymer_runs is not None: if observed_length < max_homopolymer_runs: raise ValueError("The parameter \"observed_length\" must " + "longer than the parameter \"max_homopolymer_runs\"!") if undesired_motifs is not None: for index, undesired_motif in enumerate(undesired_motifs): if len(undesired_motif) > observed_length: raise ValueError("The parameter \"observed_length\" must " + "longer than the length of any motif in the parameter \"undesired_motifs\"!") self.observed_length = observed_length self.max_homopolymer_runs = max_homopolymer_runs self.gc_range = gc_range self.undesired_motifs = undesired_motifs def valid(self, dna_sequence, only_last=True): """ Judge whether the DNA sequence meets the local biochemical constraints. :param dna_sequence: DNA sequence to be judged. :type dna_sequence: str :param only_last: only check the DNA sequence of the last observed window. :type only_last: bool :return: judgement. :rtype: bool .. note:: "only_last" parameter is used to save time. For most tree-based coding algorithms, it is not necessary to detect the sub DNA sequences observed in each window from scratch every time. """ if only_last: observed_dna_sequence = dna_sequence[-self.observed_length:] else: observed_dna_sequence = dna_sequence for nucleotide in observed_dna_sequence: if nucleotide not in "ACGT": return False if self.max_homopolymer_runs is not None: for nucleotide in "ACGT": if nucleotide * (1 + self.max_homopolymer_runs) in observed_dna_sequence: return False if self.undesired_motifs is not None: for special in self.undesired_motifs: if special in observed_dna_sequence: return False reverse_complement = special.replace("A", "t").replace("C", "g").replace("G", "c").replace("T", "a") reverse_complement = reverse_complement[::-1].upper() if reverse_complement in observed_dna_sequence: return False if self.gc_range is not None: if len(observed_dna_sequence) >= self.observed_length: for index in range(len(observed_dna_sequence) - self.observed_length + 1): sub_dna_sequence = observed_dna_sequence[index: index + self.observed_length] gc_count = sub_dna_sequence.count("C") + sub_dna_sequence.count("G") if gc_count > self.gc_range[1] * self.observed_length: return False if gc_count < self.gc_range[0] * self.observed_length: return False else: gc_count = observed_dna_sequence.count("C") + observed_dna_sequence.count("G") if gc_count > self.gc_range[1] * self.observed_length: return False at_count = observed_dna_sequence.count("A") + observed_dna_sequence.count("T") if at_count > (1 - self.gc_range[0]) * self.observed_length: return False return True def __str__(self): info = self.screen_name + "\n" info += "maximum homopolymer runs : " + str(self.max_homopolymer_runs) + "\n" info += "local GC content range : " + str(self.gc_range[0]) + " <= GC <= " + str(self.gc_range[1]) + "\n" info += "undesired DNA motifs : " + str(self.undesired_motifs).replace("\"", "") + "\n" return info
PypiClean
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/baselib/http_client/httplib2/iri2uri.py
__author__ = "Joe Gregorio (joe@bitworking.org)" __copyright__ = "Copyright 2006, Joe Gregorio" __contributors__ = [] __version__ = "1.0.0" __license__ = "MIT" __history__ = """ """ import urlparse # Convert an IRI to a URI following the rules in RFC 3987 # # The characters we need to enocde and escape are defined in the spec: # # iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD # ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF # / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD # / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD # / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD # / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD # / %xD0000-DFFFD / %xE1000-EFFFD escape_range = [ (0xA0, 0xD7FF ), (0xE000, 0xF8FF ), (0xF900, 0xFDCF ), (0xFDF0, 0xFFEF), (0x10000, 0x1FFFD ), (0x20000, 0x2FFFD ), (0x30000, 0x3FFFD), (0x40000, 0x4FFFD ), (0x50000, 0x5FFFD ), (0x60000, 0x6FFFD), (0x70000, 0x7FFFD ), (0x80000, 0x8FFFD ), (0x90000, 0x9FFFD), (0xA0000, 0xAFFFD ), (0xB0000, 0xBFFFD ), (0xC0000, 0xCFFFD), (0xD0000, 0xDFFFD ), (0xE1000, 0xEFFFD), (0xF0000, 0xFFFFD ), (0x100000, 0x10FFFD) ] def encode(c): retval = c i = ord(c) for low, high in escape_range: if i < low: break if i >= low and i <= high: retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')]) break return retval def iri2uri(uri): """Convert an IRI to a URI. Note that IRIs must be passed in a unicode strings. That is, do not utf-8 encode the IRI before passing it into the function.""" if isinstance(uri ,unicode): (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri) authority = authority.encode('idna') # For each character in 'ucschar' or 'iprivate' # 1. encode as utf-8 # 2. then %-encode each octet of that utf-8 uri = urlparse.urlunsplit((scheme, authority, path, query, fragment)) uri = "".join([encode(c) for c in uri]) return uri if __name__ == "__main__": import unittest class Test(unittest.TestCase): def test_uris(self): """Test that URIs are invariant under the transformation.""" invariant = [ u"ftp://ftp.is.co.za/rfc/rfc1808.txt", u"http://www.ietf.org/rfc/rfc2396.txt", u"ldap://[2001:db8::7]/c=GB?objectClass?one", u"mailto:John.Doe@example.com", u"news:comp.infosystems.www.servers.unix", u"tel:+1-816-555-1212", u"telnet://192.0.2.16:80/", u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ] for uri in invariant: self.assertEqual(uri, iri2uri(uri)) def test_iri(self): """ Test that the right type of escaping is done for each part of the URI.""" self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}")) self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}")) self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}")) self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")) self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))) self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8'))) unittest.main()
PypiClean
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/nasnet/nasnet_utils.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf import tf_slim as slim arg_scope = slim.arg_scope DATA_FORMAT_NCHW = 'NCHW' DATA_FORMAT_NHWC = 'NHWC' INVALID = 'null' # The cap for tf.clip_by_value, it's hinted from the activation distribution # that the majority of activation values are in the range [-6, 6]. CLIP_BY_VALUE_CAP = 6 def calc_reduction_layers(num_cells, num_reduction_layers): """Figure out what layers should have reductions.""" reduction_layers = [] for pool_num in range(1, num_reduction_layers + 1): layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells layer_num = int(layer_num) reduction_layers.append(layer_num) return reduction_layers @slim.add_arg_scope def get_channel_index(data_format=INVALID): assert data_format != INVALID axis = 3 if data_format == 'NHWC' else 1 return axis @slim.add_arg_scope def get_channel_dim(shape, data_format=INVALID): assert data_format != INVALID assert len(shape) == 4 if data_format == 'NHWC': return int(shape[3]) elif data_format == 'NCHW': return int(shape[1]) else: raise ValueError('Not a valid data_format', data_format) @slim.add_arg_scope def global_avg_pool(x, data_format=INVALID): """Average pool away the height and width spatial dimensions of x.""" assert data_format != INVALID assert data_format in ['NHWC', 'NCHW'] assert x.shape.ndims == 4 if data_format == 'NHWC': return tf.reduce_mean(input_tensor=x, axis=[1, 2]) else: return tf.reduce_mean(input_tensor=x, axis=[2, 3]) @slim.add_arg_scope def factorized_reduction(net, output_filters, stride, data_format=INVALID): """Reduces the shape of net without information loss due to striding.""" assert data_format != INVALID if stride == 1: net = slim.conv2d(net, output_filters, 1, scope='path_conv') net = slim.batch_norm(net, scope='path_bn') return net if data_format == 'NHWC': stride_spec = [1, stride, stride, 1] else: stride_spec = [1, 1, stride, stride] # Skip path 1 path1 = tf.nn.avg_pool2d( net, ksize=[1, 1, 1, 1], strides=stride_spec, padding='VALID', data_format=data_format) path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv') # Skip path 2 # First pad with 0's on the right and bottom, then shift the filter to # include those 0's that were added. if data_format == 'NHWC': pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] path2 = tf.pad(tensor=net, paddings=pad_arr)[:, 1:, 1:, :] concat_axis = 3 else: pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]] path2 = tf.pad(tensor=net, paddings=pad_arr)[:, :, 1:, 1:] concat_axis = 1 path2 = tf.nn.avg_pool2d( path2, ksize=[1, 1, 1, 1], strides=stride_spec, padding='VALID', data_format=data_format) # If odd number of filters, add an additional one to the second path. final_filter_size = int(output_filters / 2) + int(output_filters % 2) path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv') # Concat and apply BN final_path = tf.concat(values=[path1, path2], axis=concat_axis) final_path = slim.batch_norm(final_path, scope='final_path_bn') return final_path @slim.add_arg_scope def drop_path(net, keep_prob, is_training=True): """Drops out a whole example hiddenstate with the specified probability.""" if is_training: batch_size = tf.shape(input=net)[0] noise_shape = [batch_size, 1, 1, 1] random_tensor = keep_prob random_tensor += tf.random.uniform(noise_shape, dtype=tf.float32) binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype) keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype) net = net * keep_prob_inv * binary_tensor return net def _operation_to_filter_shape(operation): splitted_operation = operation.split('x') filter_shape = int(splitted_operation[0][-1]) assert filter_shape == int( splitted_operation[1][0]), 'Rectangular filters not supported.' return filter_shape def _operation_to_num_layers(operation): splitted_operation = operation.split('_') if 'x' in splitted_operation[-1]: return 1 return int(splitted_operation[-1]) def _operation_to_info(operation): """Takes in operation name and returns meta information. An example would be 'separable_3x3_4' -> (3, 4). Args: operation: String that corresponds to convolution operation. Returns: Tuple of (filter shape, num layers). """ num_layers = _operation_to_num_layers(operation) filter_shape = _operation_to_filter_shape(operation) return num_layers, filter_shape def _stacked_separable_conv(net, stride, operation, filter_size, use_bounded_activation): """Takes in an operations and parses it to the correct sep operation.""" num_layers, kernel_size = _operation_to_info(operation) activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu for layer_num in range(num_layers - 1): net = activation_fn(net) net = slim.separable_conv2d( net, filter_size, kernel_size, depth_multiplier=1, scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1), stride=stride) net = slim.batch_norm( net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1)) stride = 1 net = activation_fn(net) net = slim.separable_conv2d( net, filter_size, kernel_size, depth_multiplier=1, scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers), stride=stride) net = slim.batch_norm( net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers)) return net def _operation_to_pooling_type(operation): """Takes in the operation string and returns the pooling type.""" splitted_operation = operation.split('_') return splitted_operation[0] def _operation_to_pooling_shape(operation): """Takes in the operation string and returns the pooling kernel shape.""" splitted_operation = operation.split('_') shape = splitted_operation[-1] assert 'x' in shape filter_height, filter_width = shape.split('x') assert filter_height == filter_width return int(filter_height) def _operation_to_pooling_info(operation): """Parses the pooling operation string to return its type and shape.""" pooling_type = _operation_to_pooling_type(operation) pooling_shape = _operation_to_pooling_shape(operation) return pooling_type, pooling_shape def _pooling(net, stride, operation, use_bounded_activation): """Parses operation and performs the correct pooling operation on net.""" padding = 'SAME' pooling_type, pooling_shape = _operation_to_pooling_info(operation) if use_bounded_activation: net = tf.nn.relu6(net) if pooling_type == 'avg': net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding) elif pooling_type == 'max': net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding) else: raise NotImplementedError('Unimplemented pooling type: ', pooling_type) return net class NasNetABaseCell(object): """NASNet Cell class that is used as a 'layer' in image architectures. Args: num_conv_filters: The number of filters for each convolution operation. operations: List of operations that are performed in the NASNet Cell in order. used_hiddenstates: Binary array that signals if the hiddenstate was used within the cell. This is used to determine what outputs of the cell should be concatenated together. hiddenstate_indices: Determines what hiddenstates should be combined together with the specified operations to create the NASNet cell. use_bounded_activation: Whether or not to use bounded activations. Bounded activations better lend themselves to quantized inference. """ def __init__(self, num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation=False): self._num_conv_filters = num_conv_filters self._operations = operations self._used_hiddenstates = used_hiddenstates self._hiddenstate_indices = hiddenstate_indices self._drop_path_keep_prob = drop_path_keep_prob self._total_num_cells = total_num_cells self._total_training_steps = total_training_steps self._use_bounded_activation = use_bounded_activation def _reduce_prev_layer(self, prev_layer, curr_layer): """Matches dimension of prev_layer to the curr_layer.""" # Set the prev layer to the current layer if it is none if prev_layer is None: return curr_layer curr_num_filters = self._filter_size prev_num_filters = get_channel_dim(prev_layer.shape) curr_filter_shape = int(curr_layer.shape[2]) prev_filter_shape = int(prev_layer.shape[2]) activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu if curr_filter_shape != prev_filter_shape: prev_layer = activation_fn(prev_layer) prev_layer = factorized_reduction( prev_layer, curr_num_filters, stride=2) elif curr_num_filters != prev_num_filters: prev_layer = activation_fn(prev_layer) prev_layer = slim.conv2d( prev_layer, curr_num_filters, 1, scope='prev_1x1') prev_layer = slim.batch_norm(prev_layer, scope='prev_bn') return prev_layer def _cell_base(self, net, prev_layer): """Runs the beginning of the conv cell before the predicted ops are run.""" num_filters = self._filter_size # Check to be sure prev layer stuff is setup correctly prev_layer = self._reduce_prev_layer(prev_layer, net) net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net) net = slim.conv2d(net, num_filters, 1, scope='1x1') net = slim.batch_norm(net, scope='beginning_bn') # num_or_size_splits=1 net = [net] net.append(prev_layer) return net def __call__(self, net, scope=None, filter_scaling=1, stride=1, prev_layer=None, cell_num=-1, current_step=None): """Runs the conv cell.""" self._cell_num = cell_num self._filter_scaling = filter_scaling self._filter_size = int(self._num_conv_filters * filter_scaling) i = 0 with tf.variable_scope(scope): net = self._cell_base(net, prev_layer) for iteration in range(5): with tf.variable_scope('comb_iter_{}'.format(iteration)): left_hiddenstate_idx, right_hiddenstate_idx = ( self._hiddenstate_indices[i], self._hiddenstate_indices[i + 1]) original_input_left = left_hiddenstate_idx < 2 original_input_right = right_hiddenstate_idx < 2 h1 = net[left_hiddenstate_idx] h2 = net[right_hiddenstate_idx] operation_left = self._operations[i] operation_right = self._operations[i+1] i += 2 # Apply conv operations with tf.variable_scope('left'): h1 = self._apply_conv_operation(h1, operation_left, stride, original_input_left, current_step) with tf.variable_scope('right'): h2 = self._apply_conv_operation(h2, operation_right, stride, original_input_right, current_step) # Combine hidden states using 'add'. with tf.variable_scope('combine'): h = h1 + h2 if self._use_bounded_activation: h = tf.nn.relu6(h) # Add hiddenstate to the list of hiddenstates we can choose from net.append(h) with tf.variable_scope('cell_output'): net = self._combine_unused_states(net) return net def _apply_conv_operation(self, net, operation, stride, is_from_original_input, current_step): """Applies the predicted conv operation to net.""" # Dont stride if this is not one of the original hiddenstates if stride > 1 and not is_from_original_input: stride = 1 input_filters = get_channel_dim(net.shape) filter_size = self._filter_size if 'separable' in operation: net = _stacked_separable_conv(net, stride, operation, filter_size, self._use_bounded_activation) if self._use_bounded_activation: net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP) elif operation in ['none']: if self._use_bounded_activation: net = tf.nn.relu6(net) # Check if a stride is needed, then use a strided 1x1 here if stride > 1 or (input_filters != filter_size): if not self._use_bounded_activation: net = tf.nn.relu(net) net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1') net = slim.batch_norm(net, scope='bn_1') if self._use_bounded_activation: net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP) elif 'pool' in operation: net = _pooling(net, stride, operation, self._use_bounded_activation) if input_filters != filter_size: net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1') net = slim.batch_norm(net, scope='bn_1') if self._use_bounded_activation: net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP) else: raise ValueError('Unimplemented operation', operation) if operation != 'none': net = self._apply_drop_path(net, current_step=current_step) return net def _combine_unused_states(self, net): """Concatenate the unused hidden states of the cell.""" used_hiddenstates = self._used_hiddenstates final_height = int(net[-1].shape[2]) final_num_filters = get_channel_dim(net[-1].shape) assert len(used_hiddenstates) == len(net) for idx, used_h in enumerate(used_hiddenstates): curr_height = int(net[idx].shape[2]) curr_num_filters = get_channel_dim(net[idx].shape) # Determine if a reduction should be applied to make the number of # filters match. should_reduce = final_num_filters != curr_num_filters should_reduce = (final_height != curr_height) or should_reduce should_reduce = should_reduce and not used_h if should_reduce: stride = 2 if final_height != curr_height else 1 with tf.variable_scope('reduction_{}'.format(idx)): net[idx] = factorized_reduction( net[idx], final_num_filters, stride) states_to_combine = ( [h for h, is_used in zip(net, used_hiddenstates) if not is_used]) # Return the concat of all the states concat_axis = get_channel_index() net = tf.concat(values=states_to_combine, axis=concat_axis) return net @slim.add_arg_scope # No public API. For internal use only. def _apply_drop_path(self, net, current_step=None, use_summaries=False, drop_connect_version='v3'): """Apply drop_path regularization. Args: net: the Tensor that gets drop_path regularization applied. current_step: a float32 Tensor with the current global_step value, to be divided by hparams.total_training_steps. Usually None, which defaults to tf.train.get_or_create_global_step() properly casted. use_summaries: a Python boolean. If set to False, no summaries are output. drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether the dropout rate is scaled by current_step (v1), layer (v2), or both (v3, the default). Returns: The dropped-out value of `net`. """ drop_path_keep_prob = self._drop_path_keep_prob if drop_path_keep_prob < 1.0: assert drop_connect_version in ['v1', 'v2', 'v3'] if drop_connect_version in ['v2', 'v3']: # Scale keep prob by layer number assert self._cell_num != -1 # The added 2 is for the reduction cells num_cells = self._total_num_cells layer_ratio = (self._cell_num + 1)/float(num_cells) if use_summaries: with tf.device('/cpu:0'): tf.summary.scalar('layer_ratio', layer_ratio) drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob) if drop_connect_version in ['v1', 'v3']: # Decrease the keep probability over time if current_step is None: current_step = tf.train.get_or_create_global_step() current_step = tf.cast(current_step, tf.float32) drop_path_burn_in_steps = self._total_training_steps current_ratio = current_step / drop_path_burn_in_steps current_ratio = tf.minimum(1.0, current_ratio) if use_summaries: with tf.device('/cpu:0'): tf.summary.scalar('current_ratio', current_ratio) drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob)) if use_summaries: with tf.device('/cpu:0'): tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob) net = drop_path(net, drop_path_keep_prob) return net class NasNetANormalCell(NasNetABaseCell): """NASNetA Normal Cell.""" def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation=False): operations = ['separable_5x5_2', 'separable_3x3_2', 'separable_5x5_2', 'separable_3x3_2', 'avg_pool_3x3', 'none', 'avg_pool_3x3', 'avg_pool_3x3', 'separable_3x3_2', 'none'] used_hiddenstates = [1, 0, 0, 0, 0, 0, 0] hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0] super(NasNetANormalCell, self).__init__(num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation) class NasNetAReductionCell(NasNetABaseCell): """NASNetA Reduction Cell.""" def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation=False): operations = ['separable_5x5_2', 'separable_7x7_2', 'max_pool_3x3', 'separable_7x7_2', 'avg_pool_3x3', 'separable_5x5_2', 'none', 'avg_pool_3x3', 'separable_3x3_2', 'max_pool_3x3'] used_hiddenstates = [1, 1, 1, 0, 0, 0, 0] hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0] super(NasNetAReductionCell, self).__init__(num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation)
PypiClean
/EggBasket-0.6.1b.tar.bz2/EggBasket-0.6.1b/README.txt
EggBasket ========= :Author: Christopher Arndt :Version: 0.6.1b :Date: 2008-07-04 :Description: A simple, lightweight Python Package Index (aka Cheeseshop) clone. .. contents:: :depth: 1 Overview -------- EggBasket_ is a web application which provides a service similar and compatible to the `Python Package Index`_ (aka Cheeseshop). It allows you to maintain your own local repository of Python packages required by your installations. It is implemented using the TurboGears_ web framework, Genshi_ and SQLAlchemy_. .. warning:: This is beta-stage software. All the basic operations necessary to support a setuptools-based infrastructure are there, but some convenience features are missing and the software has not been tested extensively. **Use at your own risk!** Features -------- * Can be used by setuptools/easy_install as the package index and repository. * Supports the distutils ``upload`` protocol. * Has a simple, role-based permission system to grant/deny access to the functions of the server (for example package uploads) to groups of users. * Requires only SQLite as the database system (included with Python 2.5). * Is able to read and display meta data from the following distribution package formats (source and binary): ``.egg``, ``.tar``, ``.tar.bz2``, ``.tar.gz``, ``.tgz``, ``.zip`` * Any other file format can be configured to be listed under the distribution files for a package (by default this includes ``.exe`` and ``.rpm`` and ``.tar.Z`` files in addition to the filetypes listed above). * Can be run without any configuration by just initializing the database and starting the server from within a directory containing package directories (see "Usage"). Todo ---- During beta phase: * Add support for MD5 check sums. * Add more error and sanity checks to the upload handling. * Add pagination to the main package list. Post 1.0 release: * Cache package listings and meta data. * Improve DBmechanic-based admin interface for adding users and groups and setting configuration values (currently disabled by default). * Add support for GPG signatures. Acknowledgments --------------- This application is a re-implementation (almost no shared code) of the haufe.eggserver_ Grok application with some improvements. Installation ------------ To install EggBasket_ from the Cheeseshop_ use `easy_install`_:: [sudo] easy_install EggBasket This requires the setuptools_ package to be installed. If you have not done so already, download the `ez_setup.py`_ script and run it to install setuptools. Usage ----- EggBasket server ~~~~~~~~~~~~~~~~ * Your packages should all reside under a common root directory, with a sub-directory for each package with the same base name as the distribution. The sub-directories should each contain the egg files and source archives for all available versions of the package. The package directories will be created by the application when using the upload command (see below). * Open a terminal, change to the directory which contains the packages and, if you are haven't already done so, initialize the database with:: eggbasket-server --init [<config file>] * Start the application server with:: eggbasket-server [<config file>] You can also set the location of the package root directory in the configuration with the ``eggbasket.package_root`` setting and start the server anywhere you want. If no configuration file is specified on the command line, the default configuration file included in the egg will be used. The default configuration file can also be found in the source distribution and be adapted for your environment. The server either needs write permissions in the directory where it is started, or you need to change the path of the database and the access log in the configuration so they can be written by the server. Of course, package uploads will also only work if the server has the permissions to create any missing package directories or write in existing ones. * To stop the server just hit ``Control-C`` in the terminal or kill the process. * You can look at the package index with your web browser by opening the URL ``http://localhost:3442/``. The default port ``3442`` can be changed by setting the ``server.socket_port`` option in the configuration file. Using EggBasket with ``distutils`` & ``easy_install`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * You can instruct easy_install_ to search & download packages from your package repository by specifying the URL to your server with the ``-i`` option. Example:: easy_install -i http://localhost:3442/ PACKAGE_NAME * Additionally, it might be necessary to restrict the hosts from which easy_install will download to your EggBasket server with the ``-H`` option. Example:: easy_install -H localhost:3442 -i http::/localhost:3442/ PACKAGE_NAME * You can also set the ``eggbasket.rewrite_download_url`` resp. ``eggbasket.rewrite_homepage_url`` settings in the configuration to ``True`` and EggBasket will replace the download resp. homepage URL of each package in the package meta data view with the URL of the package distribution files listing on the EggBasket server. * You can upload a package to your repository with the distutils ``upload`` command, for example:: python setup.py bdist_egg upload -r http://localhost:3442/upload This command will ask for your username and password on the server. You can store these and the repository URL in your ``.pypirc`` file. See the `distutils documentation`_ for more information. * Of course you can always just copy package distribution files manually in the filesystem to your repository or upload them to the appropriate place with ``scp`` etc. The application will find and list new files without the need to "register" them as is necessary with the original PyPI. Permissions ~~~~~~~~~~~ EggBasket uses a simple, role-based permission system to grant/restrict access to the functions of the server. Here is a list of the defined permissions and their meaning: * ``viewpkgs`` - User can view the list of all packages * ``viewfiles`` - User can view the list of distribution files for a package. * ``viewinfo`` - User can view the meta data for a package distribution file. * ``download`` - User can download a package distribution file. * ``upload`` - User can upload a package distribution file. * ``overwrite`` - User can overwrite and existing package distribution file. * ``delete`` - User can delete a package distribution file through the web interface. You can let EggBasket create an initial admin user, groups and permissions in the database by giving the ``--init`` option to the ``eggbasket-server`` command:: eggbasket-server --init [<config file>] This will create the following objects and relations in the database: * The above listed permissions. * The following groups (with permissions in brackets): * anonymous (viewpkgs, viewfiles, viewinfo, download) * authenticated (viewpkgs, viewfiles, viewinfo, download) * maintainer (upload, overwrite, delete) * admin * A user with user name/password "admin", belonging to the groups "maintainer" and "admin". The groups "anonymous" and "authenticated" are special groups to which all anonymous (i.e. not logged in) resp. all authenticated (logged in) users belong automatically. With the default permission setup, uploading through the server is restricted to users that are members of a group that has the "upload" permission. The configuration page can only be accessed by members of the "admin" group. Everything else can be accessed all users, whether authenticated or not. Please note that if you want to give a certain permission to all users, whether logged in or not, you need to give this permission to both the "anonymous" AND the "authenticated" group. This is what the standard permission setup already does for all permissions except "upload". See the TurboGears documentation on Identity_ for background information. .. _turbogears: http://www.turbogears.org/ .. _genshi: http://genshi.edgewall.org/ .. _sqlalchemy: http://www.sqlalchemy.org/ .. _haufe.eggserver: http://cheeseshop.python.org/pypi/haufe.eggserver .. _eggbasket: http://chrisarndt.de/projects/eggbasket/ .. _cheeseshop: .. _python package index: http://cheeseshop.python.org/pypi/ .. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools .. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall .. _ez_setup.py: http://peak.telecommunity.com/dist/ez_setup.py .. _distutils documentation: http://docs.python.org/dist/package-upload.html .. _identity: http://docs.turbogears.org/1.0/GettingStartedWithIdentity .. include:: CHANGELOG.txt
PypiClean
/BuildStream-external-0.30.0.tar.gz/BuildStream-external-0.30.0/bst_external/elements/oci.py
import itertools import stat import os import tempfile import tarfile import hashlib import gzip import json import codecs import shutil import filecmp from contextlib import contextmanager, ExitStack from collections.abc import Mapping from buildstream import Element, ElementError, Scope class blob: def __init__(self, root, media_type=None, text=False, mode='oci', legacy_config=None): self.root = root self.descriptor = None self.media_type = media_type self.text = text self.mode = mode self.filename = None self.legacy_config = {} if legacy_config: self.legacy_config.update(legacy_config) self.legacy_id = None @contextmanager def create(self): with tempfile.NamedTemporaryFile(mode='w+b', dir=self.root, delete=False) as f: filename = f.name try: if self.text: yield codecs.getwriter('utf-8')(f) else: yield f self.descriptor = {} if self.media_type: self.descriptor['mediaType'] = self.media_type f.seek(0, 2) self.descriptor['size'] = f.tell() f.seek(0) h = hashlib.sha256() while True: data = f.read(16*1204) if len(data) == 0: break h.update(data) if self.mode == 'oci': self.descriptor['digest'] = 'sha256:{}'.format(h.hexdigest()) os.makedirs(os.path.join(self.root, 'blobs', 'sha256'), exist_ok=True) self.filename = os.path.join(self.root, 'blobs', 'sha256', h.hexdigest()) else: assert self.mode == 'docker' if self.media_type.endswith('+json'): self.filename = os.path.join(self.root, '{}.json'.format(h.hexdigest())) self.descriptor = '{}.json'.format(h.hexdigest()) elif self.media_type.startswith('application/vnd.oci.image.layer.v1.tar'): blobdir = os.path.join(self.root, h.hexdigest()) os.makedirs(blobdir) self.filename = os.path.join(blobdir, 'layer.tar') with open(os.path.join(blobdir, 'VERSION'), 'w') as f: f.write('1.0') self.legacy_config['id'] = h.hexdigest() self.legacy_id = h.hexdigest() with open(os.path.join(blobdir, 'json'), 'w', encoding='utf-8') as f: json.dump(self.legacy_config, f) self.descriptor = os.path.join(h.hexdigest(), 'layer.tar') else: assert False os.rename(filename, self.filename) except: try: os.unlink(filename) except: pass raise def safe_path(path): norm = os.path.normpath(path) if os.path.isabs(norm): return os.path.relpath(norm, '/') else: return norm class OciElement(Element): BST_ARTIFACT_VERSION = 1 def configure(self, node): self.node_validate(node, [ 'mode', 'gzip', 'images', 'annotations' ]) self.mode = self.node_get_member(node, str, 'mode', 'oci') if self.mode not in ['docker', 'oci']: raise ElementError('{}: Mode must be "oci" or "docker"'.format(self.node_provenance(node, 'mode'))) self.gzip = self.node_get_member(node, bool, 'gzip', self.mode == 'oci') if 'annotations' not in node: self.annotations = None else: self.annotations = {} annotations = self.node_get_member(node, Mapping, 'images') for k, _ in self.node_items(annotations): v = self.node_subst_member(annotations, k) self.annotations[k] = v self.images = [] for image in self.node_get_member(node, list, 'images'): self.node_validate(image, [ 'parent', 'layer', 'architecture', 'variant', 'os', 'os.version', 'os.features', 'author', 'comment', 'config', 'annotations' ] + (['tags'] if self.mode == 'docker' else [])) parent = self.node_get_member(image, Mapping, 'parent', None) image_value = {} if parent: self.node_validate(parent, [ 'element', 'image' ]) parent = { 'element': self.node_get_member(parent, str, 'element'), 'image': self.node_get_member(parent, int, 'image', 0), } image_value['parent'] = parent if 'layer' in image: image_value['layer'] = self.node_subst_list(image, 'layer') image_value['architecture'] = \ self.node_subst_member(image, 'architecture') if 'tags' in image: image_value['tags'] = \ self.node_subst_list(image, 'tags') image_value['os'] = self.node_subst_member(image, 'os') if 'os.version' in image: image_value['os.version'] = \ self.node_subst_member(image, 'os.version') if 'os.features' in image: image_value['os.features'] = \ self.node_subst_list(image, 'os.features') if 'os.features' in image: image_value['variant'] = \ self.node_subst_member(image, 'variant') if 'author' in image: image_value['author'] = \ self.node_subst_member(image, 'author') if 'comment' in image: image_value['comment'] = \ self.node_subst_member(image, 'comment') if 'config' in image: config = self.node_get_member(image, Mapping, 'config') common_config = [ 'User', 'ExposedPorts', 'Env', 'Entrypoint', 'Cmd', 'Volumes', 'WorkingDir' ] docker_config = [ 'Memory', 'MemorySwap', 'CpuShares', 'Healthcheck', ] oci_config = [ 'Labels', 'StopSignal' ] self.node_validate(config, common_config + (docker_config if self.mode == 'docker' else oci_config)) config_value = {} for member in ['User', 'WorkingDir', 'StopSignal']: if member in config: config_value[member] = \ self.node_subst_member(config, member) for member in ['Memory', 'MemorySwap', 'CpuShares']: if member in config: config_value[member] = \ int(self.node_subst_member(config, member)) for member in ['ExposedPorts', 'Volumes', 'Env', 'Entrypoint', 'Cmd']: if member in config: config_value[member] = \ self.node_subst_list(config, member) if 'Labels' in config: labels = self.node_get_member(config, Mapping, 'Labels') config_value['Labels'] = {} for k, v in self.node_items(labels): config_value['Labels'][k] = v if 'Healthcheck' in config: healthcheck = self.node_get_member(config, Mapping, 'Healthcheck') self.node_validate(healthcheck, [ 'Test', 'Interval', 'Timeout', 'Retries' ]) config_value['Healthcheck'] = {} if 'Test' in healthcheck: config_value['Healthcheck']['Test'] = self.node_subst_list(healthcheck, 'Test') for member in ['Interval', 'Timeout', 'Retries']: if member in healthcheck: config_value['Healthcheck'][member] = int(self.node_subst_member(healthcheck, member)) image_value['config'] = config_value if 'annotations' in image: image_value['annotations'] = {} annotations = \ self.node_get_member(image, Mapping, 'annotations') for k, _ in self.node_items(annotations): v = self.node_subst_member(annotations, k) image_value['annotations'][k] = v self.images.append(image_value) def preflight(self): pass def get_unique_key(self): return {'annotations': self.annotations, 'images': self.images, 'gzip': self.gzip} def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def _build_image(self, sandbox, image, root, output): parent = os.path.join(root, 'parent') parent_checkout = os.path.join(root, 'parent_checkout') if 'layer' in image: if os.path.exists(parent_checkout): shutil.rmtree(parent_checkout) os.makedirs(os.path.join(parent_checkout)) layer_descs = [] layer_files = [] diff_ids = [] history = None legacy_parent = None config = {} config['created'] = '2011-11-11T11:11:11Z' if 'author' in image: config['author'] = image['author'] config['architecture'] = image['architecture'] config['os'] = image['os'] if 'config' in image: config['config'] = {} for k, v in image['config'].items(): if k in ['ExposedPorts', 'Volumes']: config['config'][k] = {} for value in v: config['config'][k][value] = {} else: config['config'][k] = v if 'parent' in image: if os.path.exists(parent): shutil.rmtree(parent) parent_dep = self.search(Scope.BUILD, image['parent']['element']) if not parent_dep: raise ElementError('{}: Element not in dependencies: {}'.format(self, image['parent']['element'])) parent_dep.stage_dependency_artifacts(sandbox, Scope.RUN, path='parent') if not os.path.exists(os.path.join(parent, 'index.json')): with open(os.path.join(parent, 'manifest.json'), 'r', encoding='utf-8') as f: parent_index = json.load(f) parent_image = parent_index[image['parent']['image']] layers = parent_image['Layers'] with open(os.path.join(parent, safe_path(parent_image['Config'])), 'r', encoding='utf-8') as f: image_config = json.load(f) diff_ids = image_config['rootfs']['diff_ids'] if 'history' in image_config: history = image_config['history'] for i, layer in enumerate(layers): _, diff_id = diff_ids[i].split(':', 1) with open(os.path.join(parent, safe_path(layer)), 'rb') as origblob: if self.gzip: targz_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar+gzip', mode=self.mode) with targz_blob.create() as gzipfile: with gzip.GzipFile(filename=diff_id, fileobj=gzipfile, mode='wb', mtime=1320937200) as gz: shutil.copyfileobj(origblob, gz) layer_descs.append(targz_blob.descriptor) layer_files.append(targz_blob.filename) legacy_parent = tar_blob.legacy_id else: legacy_config = { 'os': image['os'] } if legacy_parent: legacy_config['parent'] = legacy_parent tar_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar', mode=self.mode) with tar_blob.create() as newfile: shutil.copyfileobj(origblob, newfile) layer_descs.append(tar_blob.descriptor) layer_files.append(tar_blob.filename) legacy_parent = tar_blob.legacy_id else: with open(os.path.join(parent, 'index.json'), 'r', encoding='utf-8') as f: parent_index = json.load(f) parent_image_desc = \ parent_index['manifests'][image['parent']['image']] algo, h = parent_image_desc['digest'].split(':', 1) with open(os.path.join(parent, 'blobs', safe_path(algo), safe_path(h)), 'r', encoding='utf-8') as f: image_manifest = json.load(f) algo, h = image_manifest['config']['digest'].split(':', 1) with open(os.path.join(parent, 'blobs', safe_path(algo), safe_path(h)), 'r', encoding='utf-8') as f: image_config = json.load(f) diff_ids = image_config['rootfs']['diff_ids'] if 'history' in image_config: history = image_config['history'] for i, layer in enumerate(image_manifest['layers']): _, diff_id = diff_ids[i].split(':', 1) algo, h = layer['digest'].split(':', 1) origfile = os.path.join(parent, 'blobs', safe_path(algo), safe_path(h)) with ExitStack() as e: if 'layer' not in image and i+1 == len(image_manifest['layers']): # The case were we do not add a layer, the last imported layer has to be fully reconfigured legacy_config = {} legacy_config.update(config) if legacy_parent: legacy_config['parent'] = legacy_parent else: legacy_config = { 'os': image['os'] } if legacy_parent: legacy_config['parent'] = legacy_parent if self.gzip: output_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar+gzip', mode=self.mode) else: output_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar', mode=self.mode, legacy_config=legacy_config) outp = e.enter_context(output_blob.create()) inp = e.enter_context(open(origfile, 'rb')) if layer['mediaType'].endswith('+gzip'): if self.gzip: shutil.copyfileobj(inp, outp) else: gz = e.enter_context(gzip.open(filename=inp, mode='rb')) shutil.copyfileobj(gz, outp) else: if self.gzip: gz = e.enter_context(gzip.GzipFile(filename=diff_id, fileobj=outp, mode='wb', mtime=1320937200)) shutil.copyfileobj(inp, gz) else: shutil.copyfileobj(inp, outp) layer_descs.append(output_blob.descriptor) layer_files.append(output_blob.filename) legacy_parent = output_blob.legacy_id if 'parent' in image and 'layer' in image: unpacked = False if isinstance(parent_dep, OciElement): # Here we read the parent configuration to checkout # the artifact which is much faster than unpacking the tar # files. layers = [] parent_image = image['parent']['image'] for layer in parent_dep.images[parent_image]['layer']: layer_dep = parent_dep.search(Scope.BUILD, layer) if not layer_dep: raise ElementError('{}: Element not in dependencies: {}'.format(parent_dep, layer)) # We need to verify dependencies. If not in current # element's dependencies, then we cannnot safely assume # it is cached. Parent could be cached while its # dependencies either removed or not pulled. if layer_dep != self.search(Scope.BUILD, layer): self.warn('In order to optimize building of {}, you should add {} as build dependency'.format(self.name, layer)) layers = None break else: layers.append(layer_dep) if layers is not None: with self.timed_activity('Checking out layer from {}'.format(parent_dep.name)): for layer_dep in layers: layer_dep.stage_dependency_artifacts(sandbox, Scope.RUN, path='parent_checkout') unpacked = True if not unpacked: for layer in layer_files: if self.gzip: mode='r:gz' else: mode='r:' with self.timed_activity('Decompressing layer {}'.format(layer)): with tarfile.open(layer, mode=mode) as t: members = [] for info in t.getmembers(): if '/../' in info.name: continue if info.name.startswith('../'): continue dirname, basename = os.path.split(info.name) if basename == '.wh..wh..opq': for entry in os.listdir(os.path.join(parent_checkout, dirname)): full_entry = os.path.join(parent_checkout, dirname, entry) if os.path.islink(full_entry) or not os.path.isdir(full_entry): os.unlink(full_entry) else: shutil.rmtree(full_entry) elif basename.startswith('.wh.'): full_entry = os.path.join(parent_checkout, dirname, basename[4:]) if os.path.islink(full_entry) or not os.path.isdir(full_entry): os.unlink(full_entry) else: shutil.rmtree(full_entry) else: members.append(info) t.extractall(path=parent_checkout, members=members) legacy_config = {} legacy_config.update(config) if legacy_parent: legacy_config['parent'] = legacy_parent if 'layer' in image: deps = [] for name in image['layer']: dep = self.search(Scope.BUILD, name) dep.stage_dependency_artifacts(sandbox, Scope.RUN, path='layer') layer = os.path.join(root, 'layer') with self.timed_activity('Transforming into layer'): for root, dirs, files in os.walk(parent_checkout): for f in itertools.chain(files, dirs): rel = os.path.relpath(os.path.join(root, f), parent_checkout) if not os.path.lexists(os.path.join(layer, rel)) \ and os.path.lexists(os.path.dirname(os.path.join(layer, rel))): whfile = os.path.join(layer, os.path.relpath(root, parent_checkout), '.wh.' + f) with open(whfile, 'w') as f: pass if 'parent' in image: for root, dirs, files in os.walk(layer): for f in files: new = os.path.join(root, f) rel = os.path.relpath(os.path.join(root, f), layer) old = os.path.join(parent_checkout, rel) if os.path.lexists(old): old_st = os.lstat(old) new_st = os.lstat(new) if old_st.st_mode != new_st.st_mode: continue if int(old_st.st_mtime) != int(new_st.st_mtime): continue if stat.S_ISLNK(old_st.st_mode): if os.readlink(old) == os.readlink(new): os.unlink(new) else: if filecmp.cmp(new, old): os.unlink(new) with tempfile.TemporaryFile(mode='w+b') as tfile: with tarfile.open(fileobj=tfile, mode='w:') as t: with self.timed_activity('Building layer tar'): for root, dirs, files in os.walk(layer): dirs.sort() for f in itertools.chain(sorted(files), dirs): path = os.path.join(root, f) arcname = os.path.relpath(path, layer) st = os.lstat(path) tinfo = tarfile.TarInfo(name=arcname) tinfo.uid = 0 tinfo.gid = 0 tinfo.mode = stat.S_IMODE(st.st_mode) tinfo.mtime = st.st_mtime if stat.S_ISDIR(st.st_mode): tinfo.type = tarfile.DIRTYPE t.addfile(tinfo, None) elif stat.S_ISREG(st.st_mode): tinfo.type = tarfile.REGTYPE tinfo.size = st.st_size with open(path, 'rb') as fd: t.addfile(tinfo, fd) elif stat.S_ISLNK(st.st_mode): tinfo.type = tarfile.SYMTYPE tinfo.linkname = os.readlink(path) t.addfile(tinfo, None) else: raise ElementError('{}: Unexpected file type for: {}'.format(self, arcname)) tfile.seek(0) tar_hash = hashlib.sha256() with self.timed_activity('Hashing layer'): while True: data = tfile.read(16*1024) if len(data) == 0: break tar_hash.update(data) tfile.seek(0) if self.gzip: targz_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar+gzip', mode=self.mode) with self.timed_activity('Compressing layer'): with targz_blob.create() as gzipfile: with gzip.GzipFile(filename=tar_hash.hexdigest(), fileobj=gzipfile, mode='wb', mtime=1320937200) as gz: shutil.copyfileobj(tfile, gz) layer_descs.append(targz_blob.descriptor) else: copied_blob = blob(output, media_type='application/vnd.oci.image.layer.v1.tar', mode=self.mode, legacy_config=legacy_config) with copied_blob.create() as copiedfile: shutil.copyfileobj(tfile, copiedfile) layer_descs.append(copied_blob.descriptor) legacy_parent = copied_blob.legacy_id diff_ids.append('sha256:{}'.format(tar_hash.hexdigest())) if not history: history = [] hist_entry = {} if 'layer' not in image: hist_entry['empty_layer'] = True if 'author' in image: hist_entry['author'] = image['author'] if 'comment' in image: hist_entry['comment'] = image['comment'] history.append(hist_entry) config['rootfs'] = {'type': 'layers', 'diff_ids': diff_ids} config['history'] = history config_blob = blob(output, media_type='application/vnd.oci.image.config.v1+json', text=True, mode=self.mode) with config_blob.create() as configfile: json.dump(config, configfile) if self.mode == 'docker': manifest = { 'Config': config_blob.descriptor, 'Layers': layer_descs } legacy_repositories = {} if 'tags' in image: manifest['RepoTags'] = image['tags'] for tag in image['tags']: name, version = tag.split(':', 1) if name not in legacy_repositories: legacy_repositories[name] = {} legacy_repositories[name][version] = legacy_parent return manifest, legacy_repositories else: manifest = { 'schemaVersion': 2 } manifest['layers'] = layer_descs manifest['config'] = config_blob.descriptor if 'annotations' in image: manifest['annotations'] = image['annotations'] manifest_blob = blob(output, media_type='application/vnd.oci.image.manifest.v1+json', text=True) with manifest_blob.create() as manifestfile: json.dump(manifest, manifestfile) platform = { 'os': image['os'], 'architecture': image['architecture'] } if 'os.version' in image: platform['os.version'] = image['os.version'] if 'os.features' in image: platform['os.features'] = image['os.features'] if 'variant' in image: platform['variant'] = image['variant'] manifest_blob.descriptor['platform'] = platform return manifest_blob.descriptor, {} def assemble(self, sandbox): root = sandbox.get_directory() output = os.path.join(root, 'output') os.makedirs(output) manifests = [] legacy_repositories = {} image_counter = 1 for image in self.images: with self.timed_activity('Creating image {}'.format(image_counter)): manifest, legacy_repositories_part = self._build_image(sandbox, image, root, output) manifests.append(manifest) legacy_repositories.update(legacy_repositories_part) image_counter += 1 if self.mode == 'docker': with open(os.path.join(output, 'manifest.json'), 'w', encoding='utf-8') as f: json.dump(manifests, f) with open(os.path.join(output, 'repositories'), 'w', encoding='utf-8') as f: json.dump(legacy_repositories, f) else: index = { 'schemaVersion': 2 } index['manifests'] = manifests if self.annotations: index['annotations'] = self.annotations with open(os.path.join(output, 'index.json'), 'w', encoding='utf-8') as f: json.dump(index, f) oci_layout = { 'imageLayoutVersion': '1.0.0' } with open(os.path.join(output, 'oci-layout'), 'w', encoding='utf-8') as f: json.dump(oci_layout, f) return 'output' def setup(): return OciElement
PypiClean
/MagnetiCalc-1.15.2.tar.gz/MagnetiCalc-1.15.2/magneticalc/API.py
# ISC License # # Copyright (c) 2020–2022, Paul Wilhelm, M. Sc. <anfrage@paulwilhelm.de> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import h5py import numpy as np from typing import Dict, List, Union from magneticalc.MagnetiCalc_Data import MagnetiCalc_Data class API: """ API class. """ @staticmethod def import_wire(filename: str) -> np.ndarray: """ Imports wire points from a TXT file. @param filename: Filename @return: NumPy array of 3D points """ data = np.loadtxt(filename) assert data.shape[1] == 3, "Expecting array of 3D points" return data @staticmethod def export_wire(filename: str, data: Union[List, np.ndarray]) -> None: """ Exports wire points to a TXT file. @param filename: Filename @param data: NumPy array of 3D points """ _data_ = np.array(data) assert _data_.shape[1] == 3, "Expecting array of 3D points" np.savetxt( filename, _data_ # type: ignore ) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @staticmethod def import_hdf5(filename: str) -> MagnetiCalc_Data: """ Imports data from an HDF5 container. Opens an HDF5 file and converts every group and subgroup into a dictionary where the keys are the group keys and the items are the datasets. @param filename: Filename @return: MagnetiCalc_Data object (can be accessed like a dictionary) """ hdf5_group = h5py.File(filename, "r") data = {} API._hdf5_group_to_dict(hdf5_group, data) hdf5_group.close() return MagnetiCalc_Data(data) @staticmethod def export_hdf5(filename: str, data: Union[Dict, MagnetiCalc_Data]) -> None: """ Exports data to an HDF5 container. Takes a dictionary and writes an HDF5 file using keys as keys, and items as groups if they are dictionaries or as datasets otherwise. @param filename: Filename @param data: Dictionary or MagnetiCalc_Data object """ hdf5_group = h5py.File(filename, "w") _data_ = data.dictionary if isinstance(data, MagnetiCalc_Data) else data API._dict_to_hdf5_group(hdf5_group, _data_) hdf5_group.close() # ------------------------------------------------------------------------------------------------------------------ @staticmethod def _dict_to_hdf5_group(hdf5_group: h5py.Group, dictionary: Dict) -> None: """ Recursively transforms a dictionary into an HDF5 group (in-place). @param hdf5_group: HDF5 group @param dictionary: Dictionary """ for key in dictionary.keys(): if isinstance(dictionary[key], dict): group = hdf5_group.create_group(key) API._dict_to_hdf5_group(group, dictionary[key]) else: hdf5_group[key] = dictionary[key] @staticmethod def _hdf5_group_to_dict(hdf5_group: h5py.Group, dictionary: Dict) -> None: """ Recursively transforms an HDF5 group into a dictionary (in-place). @param hdf5_group: HDF5 group @param dictionary: Dictionary """ for key in [key for key in hdf5_group]: if isinstance(hdf5_group[key], h5py.Dataset): dictionary[key] = hdf5_group[key][()] else: dictionary[key] = {} API._hdf5_group_to_dict(hdf5_group[key], dictionary[key])
PypiClean
/Cohen-0.7.4.tar.gz/Cohen-0.7.4/coherence/upnp/services/servers/media_receiver_registrar_server.py
# Copyright 2006, Frank Scholz <coherence@beebits.net> # Content Directory service from twisted.web import resource from coherence.upnp.core.soap_service import UPnPPublisher from coherence.upnp.core import service class FakeMediaReceiverRegistrarBackend: def upnp_IsAuthorized(self, *args, **kwargs): r = {'Result': 1} return r def upnp_IsValidated(self, *args, **kwargs): r = {'Result': 1} return r def upnp_RegisterDevice(self, *args, **kwargs): """ in parameter RegistrationReqMsg """ RegistrationReqMsg = kwargs['RegistrationReqMsg'] """ FIXME: check with WMC and WMP """ r = {'RegistrationRespMsg': 'WTF should be in here?'} return r class MediaReceiverRegistrarControl(service.ServiceControl, UPnPPublisher): def __init__(self, server): service.ServiceControl.__init__(self) UPnPPublisher.__init__(self) self.service = server self.variables = server.get_variables() self.actions = server.get_actions() class MediaReceiverRegistrarServer(service.ServiceServer, resource.Resource): implementation = 'optional' def __init__(self, device, backend=None): self.device = device if backend == None: backend = self.device.backend resource.Resource.__init__(self) self.version = 1 self.namespace = 'microsoft.com' self.id_namespace = 'microsoft.com' service.ServiceServer.__init__(self, 'X_MS_MediaReceiverRegistrar', self.version, backend) self.device_description_tmpl = 'xbox-description-1.xml' self.control = MediaReceiverRegistrarControl(self) self.putChild('scpd.xml', service.scpdXML(self, self.control)) self.putChild('control', self.control) def listchilds(self, uri): cl = '' for c in self.children: cl += '<li><a href=%s/%s>%s</a></li>' % (uri, c, c) return cl def render(self, request): return '<html><p>root of the MediaReceiverRegistrar</p><p><ul>%s</ul></p></html>' % self.listchilds(request.uri)
PypiClean
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/accounts/migrations/0001_squashed_0019_auto_20200403_2004.py
import django.db.models.deletion from django.conf import settings from django.db import migrations, models import weblate.utils.fields import weblate.utils.render class Migration(migrations.Migration): replaces = [ ("accounts", "0001_squashed_0037_auto_20180416_1406"), ("accounts", "0002_profile_uploaded"), ("accounts", "0003_profile_translate_mode"), ("accounts", "0004_create_profile"), ("accounts", "0005_auto_20190331_2126"), ("accounts", "0006_subscriptions"), ("accounts", "0007_auto_20190411_0807"), ("accounts", "0008_auto_20190426_0941"), ("accounts", "0009_profile_zen_mode"), ("accounts", "0010_auto_20190516_1153"), ("accounts", "0011_auto_20190721_1810"), ("accounts", "0012_auto_20190805_1248"), ("accounts", "0013_auto_20190916_1203"), ("accounts", "0014_auto_20190922_1947"), ("accounts", "0015_auto_20190922_1948"), ("accounts", "0016_auto_20191115_2020"), ("accounts", "0017_auto_20200318_1014"), ("accounts", "0018_announcement_rename"), ("accounts", "0019_auto_20200403_2004"), ] initial = True dependencies = [ ("trans", "0024_resolve_auto_format"), ("lang", "0001_squashed_0011_auto_20180215_1158"), ("trans", "0001_squashed_0143_auto_20180609_1655"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("social_django", "0001_initial"), ] operations = [ migrations.CreateModel( name="Profile", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "language", models.CharField( blank=True, choices=settings.LANGUAGES, max_length=10, verbose_name="Interface Language", ), ), ("suggested", models.IntegerField(db_index=True, default=0)), ("translated", models.IntegerField(db_index=True, default=0)), ( "languages", models.ManyToManyField( blank=True, help_text="Choose the languages you can translate to. These will be offered to you on the dashboard for easier access to your chosen translations.", to="lang.Language", verbose_name="Translated languages", ), ), ( "secondary_languages", models.ManyToManyField( blank=True, help_text="Choose languages you can understand, strings in those languages will be shown in addition to the source string.", related_name="secondary_profile_set", to="lang.Language", verbose_name="Secondary languages", ), ), ( "watched", models.ManyToManyField( blank=True, help_text="You can receive notifications for watched projects and they are shown on the dashboard by default.", to="trans.Project", verbose_name="Watched projects", ), ), ( "user", models.OneToOneField( editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ( "hide_completed", models.BooleanField( default=False, verbose_name="Hide completed translations on the dashboard", ), ), ( "secondary_in_zen", models.BooleanField( default=True, verbose_name="Show secondary translations in the Zen mode", ), ), ( "hide_source_secondary", models.BooleanField( default=False, verbose_name="Hide source if a secondary translation exists", ), ), ( "dashboard_component_list", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="trans.ComponentList", verbose_name="Default component list", ), ), ( "dashboard_view", models.IntegerField( choices=[ (1, "Watched translations"), (6, "Component lists"), (4, "Component list"), (5, "Suggested translations"), ], default=1, verbose_name="Default dashboard view", ), ), ( "editor_link", models.CharField( blank=True, default="", help_text="Enter a custom URL to be used as link to the source code. You can use {{branch}} for branch, {{filename}} and {{line}} as filename and line placeholders.", max_length=200, validators=[weblate.utils.render.validate_editor], verbose_name="Editor link", ), ), ( "special_chars", models.CharField( blank=True, default="", help_text="You can specify additional special visual keyboard characters to be shown while translating. It can be useful for characters you use frequently, but are hard to type on your keyboard.", max_length=30, verbose_name="Special characters", ), ), ("uploaded", models.IntegerField(db_index=True, default=0)), ( "translate_mode", models.IntegerField( choices=[(0, "Full editor"), (1, "Zen mode")], default=0, verbose_name="Translation editor mode", ), ), ( "zen_mode", models.IntegerField( choices=[(0, "Top to bottom"), (1, "Side by side")], default=0, verbose_name="Zen editor mode", ), ), ], ), migrations.CreateModel( name="VerifiedEmail", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("email", models.EmailField(max_length=254)), ( "social", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="social_django.UserSocialAuth", ), ), ], ), migrations.CreateModel( name="Subscription", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "notification", models.CharField( choices=[ ("MergeFailureNotification", "Repository failure"), ("RepositoryNotification", "Repository operation"), ("ParseErrorNotification", "Parse error"), ("NewStringNotificaton", "New string"), ("NewContributorNotificaton", "New contributor"), ("NewSuggestionNotificaton", "New suggestion"), ( "LastAuthorCommentNotificaton", "Comment on own translation", ), ("MentionCommentNotificaton", "Mentioned in comment"), ("NewCommentNotificaton", "New comment"), ("ChangedStringNotificaton", "Changed string"), ("NewTranslationNotificaton", "New language"), ("NewComponentNotificaton", "New translation component"), ("NewAnnouncementNotificaton", "New announcement"), ("NewAlertNotificaton", "New alert"), ("PendingSuggestionsNotification", "Pending suggestions"), ("ToDoStringsNotification", "Unfinished strings"), ], max_length=100, ), ), ( "scope", models.IntegerField( choices=[ (10, "Defaults"), (20, "Admin"), (30, "Project"), (40, "Component"), ] ), ), ( "frequency", models.IntegerField( choices=[ (0, "Do not notify"), (1, "Instant notification"), (2, "Daily digest"), (3, "Weekly digest"), (4, "Monthly digest"), ] ), ), ( "component", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="trans.Component", ), ), ( "project", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="trans.Project", ), ), ( "user", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ], options={ "unique_together": { ("notification", "scope", "project", "component", "user") }, }, ), migrations.CreateModel( name="AuditLog", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "activity", models.CharField( choices=[ ("auth-connect", "auth-connect"), ("auth-disconnect", "auth-disconnect"), ("connect", "connect"), ("email", "email"), ("failed-auth", "failed-auth"), ("full_name", "full_name"), ("invited", "invited"), ("locked", "locked"), ("login", "login"), ("login-new", "login-new"), ("password", "password"), ("register", "register"), ("removed", "removed"), ("reset", "reset"), ("reset-request", "reset-request"), ("tos", "tos"), ("username", "username"), ], db_index=True, max_length=20, ), ), ("params", weblate.utils.fields.JSONField(default={})), ("address", models.GenericIPAddressField(null=True)), ("timestamp", models.DateTimeField(auto_now_add=True, db_index=True)), ( "user", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ("user_agent", models.CharField(default="", max_length=200)), ], options={}, ), ]
PypiClean
/GautamsX-6.0.13.tar.gz/GautamsX-6.0.13/bot/modules/watch.py
from telegram.ext import CommandHandler from telegram import Bot, Update from bot import Interval, DOWNLOAD_DIR, DOWNLOAD_STATUS_UPDATE_INTERVAL, dispatcher, LOGGER from bot.helper.ext_utils.bot_utils import setInterval from bot.helper.telegram_helper.message_utils import update_all_messages, sendMessage, sendStatusMessage from .mirror import MirrorListener from bot.helper.mirror_utils.download_utils.youtube_dl_download_helper import YoutubeDLHelper from bot.helper.telegram_helper.bot_commands import BotCommands from bot.helper.telegram_helper.filters import CustomFilters import threading def _watch(bot: Bot, update, isTar=False): mssg = update.message.text message_args = mssg.split(' ') name_args = mssg.split('|') try: link = message_args[1] except IndexError: msg = f"/{BotCommands.WatchCommand} [yt_dl supported link] [quality] |[CustomName] to mirror with youtube_dl.\n\n" msg += "<b>Note :- Quality and custom name are optional</b>\n\nExample of quality :- audio, 144, 240, 360, 480, 720, 1080, 2160." msg += "\n\nIf you want to use custom filename, plz enter it after |" msg += f"\n\nExample :-\n<code>/{BotCommands.WatchCommand} https://youtu.be/ocX2FN1nguA 720 |My video bro</code>\n\n" msg += "This file will be downloaded in 720p quality and it's name will be <b>My video bro</b>" sendMessage(msg, bot, update) return try: if "|" in mssg: mssg = mssg.split("|") qual = mssg[0].split(" ")[2] if qual == "": raise IndexError else: qual = message_args[2] if qual != "audio": qual = f'bestvideo[height<={qual}]+bestaudio/best[height<={qual}]' except IndexError: qual = "bestvideo+bestaudio/best" try: name = name_args[1] except IndexError: name = "" reply_to = update.message.reply_to_message if reply_to is not None: tag = reply_to.from_user.username else: tag = None pswd = "" listener = MirrorListener(bot, update, pswd, isTar, tag) ydl = YoutubeDLHelper(listener) threading.Thread(target=ydl.add_download,args=(link, f'{DOWNLOAD_DIR}{listener.uid}', qual, name)).start() sendStatusMessage(update, bot) if len(Interval) == 0: Interval.append(setInterval(DOWNLOAD_STATUS_UPDATE_INTERVAL, update_all_messages)) def watchTar(update, context): _watch(context.bot, update, True) def watch(update, context): _watch(context.bot, update) mirror_handler = CommandHandler(BotCommands.WatchCommand, watch, filters=CustomFilters.authorized_chat | CustomFilters.authorized_user, run_async=True) tar_mirror_handler = CommandHandler(BotCommands.TarWatchCommand, watchTar, filters=CustomFilters.authorized_chat | CustomFilters.authorized_user, run_async=True) dispatcher.add_handler(mirror_handler) dispatcher.add_handler(tar_mirror_handler)
PypiClean
/Glances-3.4.0.3.tar.gz/Glances-3.4.0.3/glances/ports_list.py
from glances.compat import range from glances.logger import logger from glances.globals import BSD # XXX *BSDs: Segmentation fault (core dumped) # -- https://bitbucket.org/al45tair/netifaces/issues/15 # Also used in the glances_ip plugin if not BSD: try: import netifaces netifaces_tag = True except ImportError: netifaces_tag = False else: netifaces_tag = False class GlancesPortsList(object): """Manage the ports list for the ports plugin.""" _section = "ports" _default_refresh = 60 _default_timeout = 3 def __init__(self, config=None, args=None): # ports_list is a list of dict (JSON compliant) # [ {'host': 'www.google.fr', 'port': 443, 'refresh': 30, 'description': Internet, 'status': True} ... ] # Load the configuration file self._ports_list = self.load(config) def load(self, config): """Load the ports list from the configuration file.""" ports_list = [] if config is None: logger.debug("No configuration file available. Cannot load ports list.") elif not config.has_section(self._section): logger.debug("No [%s] section in the configuration file. Cannot load ports list." % self._section) else: logger.debug("Start reading the [%s] section in the configuration file" % self._section) refresh = int(config.get_value(self._section, 'refresh', default=self._default_refresh)) timeout = int(config.get_value(self._section, 'timeout', default=self._default_timeout)) # Add default gateway on top of the ports_list lists default_gateway = config.get_value(self._section, 'port_default_gateway', default='False') if default_gateway.lower().startswith('true') and netifaces_tag: new_port = {} try: new_port['host'] = netifaces.gateways()['default'][netifaces.AF_INET][0] except KeyError: new_port['host'] = None # ICMP new_port['port'] = 0 new_port['description'] = 'DefaultGateway' new_port['refresh'] = refresh new_port['timeout'] = timeout new_port['status'] = None new_port['rtt_warning'] = None new_port['indice'] = str('port_0') logger.debug("Add default gateway %s to the static list" % (new_port['host'])) ports_list.append(new_port) # Read the scan list for i in range(1, 256): new_port = {} postfix = 'port_%s_' % str(i) # Read mandatory configuration key: host new_port['host'] = config.get_value(self._section, '%s%s' % (postfix, 'host')) if new_port['host'] is None: continue # Read optionals configuration keys # Port is set to 0 by default. 0 mean ICMP check instead of TCP check new_port['port'] = config.get_value(self._section, '%s%s' % (postfix, 'port'), 0) new_port['description'] = config.get_value( self._section, '%sdescription' % postfix, default="%s:%s" % (new_port['host'], new_port['port']) ) # Default status new_port['status'] = None # Refresh rate in second new_port['refresh'] = refresh # Timeout in second new_port['timeout'] = int(config.get_value(self._section, '%stimeout' % postfix, default=timeout)) # RTT warning new_port['rtt_warning'] = config.get_value(self._section, '%srtt_warning' % postfix, default=None) if new_port['rtt_warning'] is not None: # Convert to second new_port['rtt_warning'] = int(new_port['rtt_warning']) / 1000.0 # Indice new_port['indice'] = 'port_' + str(i) # Add the server to the list logger.debug("Add port %s:%s to the static list" % (new_port['host'], new_port['port'])) ports_list.append(new_port) # Ports list loaded logger.debug("Ports list loaded: %s" % ports_list) return ports_list def get_ports_list(self): """Return the current server list (dict of dict).""" return self._ports_list def set_server(self, pos, key, value): """Set the key to the value for the pos (position in the list).""" self._ports_list[pos][key] = value
PypiClean
/NIA_image_2latex-1.0-py3-none-any.whl/dataset/preprocessing/third_party/katex/katex.js
* This is the main entry point for KaTeX. Here, we expose functions for * rendering expressions either to DOM nodes or to markup strings. * * We also expose the ParseError class to check if errors thrown from KaTeX are * errors in the expression, or errors in javascript handling. */ var ParseError = require("./src/ParseError"); var Settings = require("./src/Settings"); var buildTree = require("./src/buildTree"); var parseTree = require("./src/parseTree"); var utils = require("./src/utils"); /** * Parse and build an expression, and place that expression in the DOM node * given. */ var render = function(expression, baseNode, options) { utils.clearNode(baseNode); var settings = new Settings(options); var tree = parseTree(expression, settings); var node = buildTree(tree, expression, settings).toNode(); baseNode.appendChild(node); }; // KaTeX's styles don't work properly in quirks mode. Print out an error, and // disable rendering. if (typeof document !== "undefined") { if (document.compatMode !== "CSS1Compat") { typeof console !== "undefined" && console.warn( "Warning: KaTeX doesn't work in quirks mode. Make sure your " + "website has a suitable doctype."); render = function() { throw new ParseError("KaTeX doesn't work in quirks mode."); }; } } /** * Parse and build an expression, and return the markup for that. */ var renderToString = function(expression, options) { var settings = new Settings(options); var tree = parseTree(expression, settings); return buildTree(tree, expression, settings).toMarkup(); }; /** * Parse an expression and return the parse tree. */ var generateParseTree = function(expression, options) { var settings = new Settings(options); return parseTree(expression, settings); }; module.exports = { render: render, renderToString: renderToString, /** * NOTE: This method is not currently recommended for public use. * The internal tree representation is unstable and is very likely * to change. Use at your own risk. */ __parse: generateParseTree, ParseError: ParseError, };
PypiClean
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/jquery-mapael/maps/world_countries.min.js
!function(a){"object"==typeof exports?module.exports=a(require("jquery"),require("jquery-mapael")):"function"==typeof define&&define.amd?define(["jquery","mapael"],a):a(jQuery,jQuery.mapael)}(function(a,b){"use strict";return a.extend(!0,b,{maps:{world_countries:{width:999.29852,height:392.03476,getCoords:function(a,b){return{x:2.775076875916*b+471.505926315,y:-2.8112860731578*a+235.89691962022}},elems:{PE:"m 246.37,248.26 c 0.32,-1.79 4.23,-4.35 2.73,-1.46 -1.45,2.09 2.59,0.39 3.11,2.75 2.72,-1.13 1.47,-5.5 4.96,-5.95 3.11,-0.83 7.69,-4.81 5.11,-7.43 2.35,-1.19 4.43,3.08 6.14,4.56 0.7,2.08 3.04,2.21 4.97,1.17 2.11,-0.15 5.75,1.18 2.69,3.69 -0.51,0.71 3.29,2.56 0.76,1.93 -3.16,0.08 -7.44,1.58 -7.92,5.32 -0.06,2.05 -3.42,3.58 -1.21,5.52 0.76,1.37 2.13,3 1.77,3.78 2.26,0.16 3.53,3.49 5.91,0.61 2.26,-1.86 -1.32,6.12 2.9,3.61 2.5,1.32 3.37,4.79 2.23,7.29 0.95,2.52 -2.79,6.04 0.3,7.72 -0.57,1.85 -2.55,3 -2.55,4.98 -3.44,2.21 -5.57,-4.41 -9.39,-4.56 -3.34,-1.31 -6.28,-3.43 -8.43,-6.26 0.32,-1.93 -1.53,-4.59 -2.66,-7.02 -2.53,-2.81 -3.15,-7.33 -5.63,-10.49 -0.47,-3 -4.42,-4.05 -5.21,-5.89 1.88,0.13 -1.01,-3.15 -0.55,-3.87 z",BF:"m 456.58,206.27 c 1.04,-2.27 -0.4,-4.54 2.65,-4.79 0.7,-1.85 0.88,-4.37 3.11,-3.1 0.83,-0.73 0.44,-1.27 1.59,-1.56 1.43,-1.81 4.2,-2.03 6.48,-3.6 3.23,-0.6 1.57,4 4.41,4.53 1.15,0.24 -1.42,0.91 0.87,2 1.62,0.34 2.62,-0.07 2.39,1.8 0.95,2.4 -3.19,1.99 -4.47,3.19 -3.06,-0.32 -7.13,-0.27 -9.66,0.43 -0.06,1.39 1.22,5.92 -0.89,2.56 -2.2,-0.12 -4.4,1.93 -5.53,-1.03 -0.26,-0.14 -0.88,0.05 -0.95,-0.42 z",FR:"m 320.44,229.09 c 3.5,-2.22 -0.96,-5.61 1.06,-8.23 1.75,-3.18 5.63,1.18 6.32,2.34 0.23,-1.32 1.46,1.48 -0.36,2.69 -1.07,2.79 -2.6,4.03 -5.24,3.39 -0.49,0.72 -1.29,0.16 -1.78,-0.18 z m -17.32,-33.96 c -1.44,-0.36 -0.63,-2.45 0.08,-0.26 z m 192.61,-78.74 c 1.65,-1.47 3.13,-2.32 2.66,0.76 -1.27,4.32 -2.55,0.43 -2.66,-0.76 z m -36.96,-15.9 c 2.7,-0.08 -1.08,-0.93 1.51,-0.72 -4.33,-0.12 3.07,-2.66 4.28,-0.7 1.46,-0.41 2.78,0.1 3.08,-0.51 -0.68,-1.77 -1.57,-3.78 0.78,-2.26 1.39,1.11 5.71,0.69 4,-0.37 2.05,-0.92 4.59,-0.73 4.1,-3.44 2.64,-1.5 4.35,1.69 6.91,1.76 -0.28,2.27 2.31,-0.77 2.15,1.29 2.43,0.75 4.64,1.76 7.05,1.89 3.81,0.08 -0.46,2.1 0.15,4.48 -2.03,-0.09 -2.17,1.61 -4.01,3.03 -0.88,1.88 2.46,-1.44 2.47,1.52 -0.67,0.65 1.58,2.16 -0.98,2.37 1.7,0.78 0.11,3.19 2.93,2.66 -1.77,2.7 -4.67,3.56 -7.34,2.1 0.36,-0.21 -3.5,-0.83 -5.33,0.71 0.58,2.4 -1.63,2.53 -3.83,1.72 -1.61,-1.41 -4.18,-10e-4 -6.48,-0.95 -2.3,-0.72 -3.81,-0.89 -2.1,-3.18 0.98,-2.31 -0.1,-1.94 0.71,-4.33 1.35,0.73 2.04,2.86 0.92,0.27 -2.12,-1.23 -0.46,-0.44 -0.93,-2.5 -1.83,0.71 -4.34,-3.53 -1.71,-2.49 -2.59,-0.07 -1.33,-0.92 -3.27,-0.96 1.23,-0.3 -1.33,0.1 -0.81,-0.41 -0.69,-0.31 -3.16,-0.22 -4.24,-0.98 z",LY:"m 497.92,151.14 c 1.22,-1.02 3.3,-2.31 2.26,-4.45 1.64,-1.36 4.45,-1.74 3.66,-4.37 2.93,1.82 6.41,0.15 9.36,2.05 2,0.86 2.23,4.49 5.53,3.54 3.2,0.07 6.84,5.03 9.01,0.5 -2.33,-4.25 4.21,-6.37 7.31,-4.84 1.14,2.42 5.27,1.09 6.57,3.1 -1.75,2.8 -0.4,6.49 -0.36,9.96 -0.07,7.58 0.05,15.16 -0.06,22.74 -1.18,0.21 -3.56,-0.76 -2.74,1.4 -7.23,-3.84 -14.52,-7.62 -22,-10.94 -2.87,0.55 -5.22,3.4 -7.74,0.43 -3.87,0.51 -4.52,-4.36 -8.24,-3.67 -0.09,-2.15 -4.24,-4.5 -1.19,-6.01 -0.81,-3.08 1.09,-6.77 -1.38,-9.44 z",BY:"m 536.15,88.53 c 2.51,-0.84 2.42,-2.61 0.94,-4.65 2.05,0.38 5.52,-0.77 6.14,-1 -1.65,-1.71 4.41,-2.24 2.21,-3.4 2.47,-1.46 5.19,-2 8.01,-1.21 0.87,0.9 5.84,-0.04 4.08,3.31 1.82,2.07 3.09,2.51 5.12,3.65 -0.7,1.84 -5.16,-0.46 -3.09,2.59 1.51,1.91 -4.05,1.08 -2.99,3.57 -2.54,-1.13 -4.21,-0.29 -6.38,-0.94 -2.85,0.32 -5.9,-1.52 -9.1,-0.89 -1.6,-0.22 -4.73,2.54 -3.66,-0.47 -0.38,-0.29 -0.92,-0.21 -1.27,-0.56 z",PK:"m 640.67,151.64 c 3.95,1.79 8.26,1.07 12.34,0.72 4.22,1.01 1.66,-5.15 5.25,-4.55 2.06,0.2 0.74,-1.54 3.23,-1.54 2.86,1.63 2.24,-2.57 3.79,-3.75 3.59,0.37 -1.34,-3.47 1.89,-2.43 2.95,0.23 1.1,-2.43 3.35,-3.6 -0.01,-1.31 -2.18,-3.16 0.77,-3.47 2.85,-1.65 6.81,-1.33 9.59,-1.23 2.13,0.39 1.58,3.56 3.46,3.2 1.26,1.55 5.23,0.15 1.53,1.71 -1.9,2.5 -5.73,1.36 -8.5,1.33 -1.73,1.51 1.24,1.92 0.04,3.16 -1.34,2.56 5.7,3.16 2.32,4.38 -1.97,1.16 0.04,3.18 -2.52,4.09 -1.14,1.82 -3.07,3.92 -4.92,5.76 -1.17,3.02 -4.19,1.45 -5.74,1.86 -1.69,1.44 -2.62,3.46 0.03,4.04 -0.74,2.43 3,2.59 2.19,5.35 -0.7,0.83 -4.08,0.91 -6.22,0.54 -1.11,2.01 -2.29,1.6 -3.54,0.89 -0.58,-0.52 -0.41,-2.6 -2.23,-2.62 0.82,-1.92 -2.84,-2.17 -0.57,-1.34 -3.12,0.1 -5.6,0.7 -7.75,0.13 -1.6,0.26 -4.51,1.16 -5.62,0.13 -0.69,-4.03 4.36,-2.41 4.62,-5.27 -2.66,0.34 -0.14,-4.03 -3.41,-3.72 -1.62,-0.75 -1.86,-2.85 -3.39,-3.79 z",ID:"m 844.1,252.59 c 0.28,1.08 0.13,-2.98 1.43,-0.99 0.85,2.54 -1.43,2.01 -1.43,0.99 z m -70.2,-19.41 c 0.55,-1.5 1.03,-0.93 0.47,-1.62 1.7,-4.63 2.41,3.7 5.92,1.12 2.7,0.67 3.97,-2.97 6.65,-0.62 2.54,-0.19 3.17,-1.43 4.17,-3.33 0.53,-1.38 1.48,-3.37 2.12,-5.45 2.11,0.1 5.07,0.12 4.94,1.43 1.01,1.31 -2.48,-0.15 -0.69,1.65 -0.18,0.67 2.72,2.27 1.24,3.68 1.07,1.24 5.35,3.79 1.08,3.13 -1.78,-1.04 -2.61,3.94 -1.82,4.68 -0.99,0.36 -2.22,1.17 -2.39,1.26 -1.79,1.91 -0.28,1.88 -0.58,3.73 -1.09,0.57 -0.82,3.52 -4.02,4.03 -2.06,1.1 -1.24,-3.42 -2.78,-1.91 -1.62,1.17 -2.41,-2.34 -3.81,0.19 -1.82,-0.08 -2.62,0.99 -2.68,-1.63 -2.12,1.16 -2.49,0.45 -4.17,0.2 -0.82,-2.04 0.27,-5.83 -2.53,-5.61 1.04,-0.68 -1.46,-1.32 -0.21,-2.52 -0.6,-0.57 -1.28,-1.56 -0.93,-2.41 z m -3.25,9.64 c -1.93,3.7 4.6,0.57 0,0 z m -6.54,-2.36 c -1.17,1.33 2.05,1.13 2.02,3.17 2.09,2.06 2.52,-1.43 0.47,-1.37 -0.4,-3.22 -1.04,-2.02 -2.49,-1.8 z m -6.41,-7.05 c -2.5,-2.43 -1.84,1.04 0,0 z m -14.87,0.47 c 2.59,-1.55 -4.34,-4.6 0,0 z m -0.58,-13.04 c -2.16,1.1 -4.7,-1.74 -6.38,-0.52 0.8,3.59 5,4.51 6.74,7.4 0.7,2.45 4.36,2.68 4.04,6.05 1.16,2.15 3.68,3.94 4.67,6.59 1.11,3.63 4.3,6.14 7.08,8.63 1.57,0.47 4.02,4.81 3.73,2.14 1.72,1.03 1.63,-0.27 3.17,0.77 0.5,-2.69 -0.31,-5.3 0.77,-7.65 -0.83,-1.61 -3.03,-3.22 -4.09,-1.06 1.48,-1.31 -0.14,-3.14 -1.22,-4.66 -2.88,-0.05 -1.81,-2.35 -2,-2.39 2.65,-1.16 -1.22,-2.63 -2.39,-1.04 -0.85,0.26 3.05,-1.4 0.22,-1.54 -1.79,-1.19 -3.08,-3.65 -5.05,-4.24 0.97,2.75 -2.27,-2.24 -2.25,-1.11 -0.91,-2.81 -5,-3.14 -5.7,-6.04 -0.29,-0.58 -0.86,-0.92 -1.34,-1.33 z m 94.76,34.81 c -2.37,1.03 -0.94,4.18 0.01,0.75 -0.25,-0.4 0.73,-0.48 -0.01,-0.75 z m -16.26,2.53 c 1.92,0.08 3.88,-1.52 0.8,-0.88 -0.51,-0.19 -0.78,0.52 -0.8,0.88 z m -3.62,0.33 c -1.7,1.62 3.67,0.44 0.84,0.12 l -0.42,-0.08 z m -2.98,0.4 c -1.36,2.21 2.94,-1.13 0.38,0.28 0.21,-0.23 -0.21,-0.46 -0.38,-0.28 z m -9.47,1.52 c 2.72,0.11 5.82,0.66 7.79,-1.08 1.07,-2.18 -0.65,1.4 -2.62,0.11 -1.61,0.69 -5.33,-2.08 -5.54,0.96 l 0.15,0.13 z m -2.95,2.04 c 1.18,0.19 5.35,3.62 4.82,0.86 -1.41,-1.2 -3.09,-1.98 -4.82,-0.86 z m -2.85,-1.73 c 1.18,-1.38 2.5,0.46 2.65,-0.56 2.04,-0.15 -0.51,-1.28 -0.71,-1.21 -0.94,0 -3.71,-1.15 -1.29,0.64 0.46,1.44 -4.5,-1.9 -3.7,1.37 0.95,0.64 2.05,-0.35 3.06,-0.24 z m -4.33,-1.99 c -1.7,1.09 -1.19,2.65 0.49,1.64 0.52,-0.83 0.94,-1.71 -0.49,-1.64 z m -5.16,-0.35 c 1.23,1.07 1.75,2.98 3.21,1.1 0.5,-2.05 -2.43,-0.3 -3.21,-1.1 z m -4.26,-3.37 c -2.23,2.03 6.23,-0.05 1.48,-0.02 -0.49,-0.01 -0.99,-0.1 -1.48,0.02 z m -17.24,-2.34 c -2.16,-1.59 -3.86,2.62 -3.51,2.14 3.4,-0.5 2.28,2.56 5.73,2.02 3.28,0.83 6.78,0.67 10.2,1.96 3.01,0.08 7.42,0.77 9.28,1.2 -0.36,-1.74 -0.25,-3.59 -2.97,-2.58 -2.68,-0.14 -1.7,-3.15 -4.88,-2.73 -1.48,-0.51 -2.83,-1.36 -3.81,0.5 -3.04,0.39 -4.71,-1.18 -6.93,-2.09 -1.3,-0.08 -1.96,-1.08 -3.1,-0.43 z m 46.68,11.07 c -0.68,2.23 3.93,-0.11 4.33,-1.86 0.55,-2.56 -2.33,0.27 -2.46,0.44 -1.17,-0.63 -1.28,0.52 -1.88,1.42 z m 19.83,-26.89 c 0.96,-0.2 -1.01,-1.41 0.79,-0.29 2.41,-1.1 -4.14,-0.71 -1.17,0.04 l 0.24,-0.12 z m -1.92,4.45 c 2.66,0.34 -0.48,-2.04 -0.41,-0.14 z m -0.02,4.1 c 2.13,1.84 3.14,0.23 1.08,-1.3 -2.01,-0.19 -6.73,-1.71 -6.8,1.46 0.13,-2.06 2.17,0.07 2.86,-0.49 1.1,0.73 2.05,-0.7 2.86,0.33 z m -6.51,-9.78 c -0.04,1.56 3.12,3.66 1.01,0.99 -2.27,-3.46 2.38,-1.26 1.92,-2.11 -3.14,-0.41 2.08,-3.57 -0.99,-3.05 -0.12,0.95 -3.06,2.86 -1.18,0.71 0.36,-1.5 0.16,-4.13 -1.02,-1.09 -0.89,1.52 0.52,2.99 0.25,4.55 z m -0.53,4.05 c -0.8,1.66 3.89,0.54 0.65,-0.16 h -0.31 z m 0.13,-3.16 c -1.86,0.54 2.01,2.75 0.39,0.39 l -0.11,-0.15 z m -2.29,9.9 c 4.74,-0.87 -3.2,-3.92 -1.39,-0.73 0.41,0.34 0.92,0.5 1.39,0.73 z m -6.26,-6.11 c -0.8,2.07 4.64,-0.11 1.11,0.16 l -0.62,-0.15 z m -3.27,-0.05 c 1.79,-0.81 -1.37,-2.18 -1.02,-0.14 1.05,-1.09 0.3,-0.5 1.02,0.14 z m -0.52,7.77 c -1.92,1.21 -1.02,5.26 0.11,2.25 -1.83,-0.61 1.19,-1.26 -0.11,-2.25 z m -1.48,2.9 c 1.73,-3.14 -1.78,-1.56 -0.22,-0.2 l 0.06,-0.18 z m -8.79,-0.75 c -0.46,2.31 4.29,1.43 2.53,-0.51 1.29,-2.09 -1.63,-7.27 2.21,-6.28 -0.53,1.72 -0.32,3.05 1.43,3.94 -1.14,1.89 1.48,2.61 1.8,0.87 2.98,0 1.1,-1.2 0.14,-2.61 1.21,-1.12 -1.82,-3.54 -2.58,-4.95 2.21,0.86 4.01,-3.08 5.78,-2.6 -0.09,-1.6 -2.38,-0.18 -1.96,-0.29 -2.28,0.21 -3.49,0.41 -5.06,1.85 -3,-0.9 -3.37,-6.82 0.98,-5.19 3.28,-1.16 7.69,2.35 9.98,-1.5 2.41,-2.9 -0.75,-1.66 -1.84,0.03 -3.07,0.67 -6.23,-0.21 -9.11,-1.01 -1.06,1.59 -3.12,1.29 -3.23,3.95 -0.71,-0.41 1.08,2.83 -0.42,2.18 -0.21,2.8 -2.89,4.96 -1.82,7.66 2.5,-1.68 2.02,3.47 1.17,4.47 z m -63.9,-21.23 c 1.46,-0.3 -3.53,-2.47 -0.65,-0.5 l 0.38,0.19 z m 96.92,9.51 c 1.18,-1.98 4.71,-1.91 6.93,-0.68 2.5,0.02 0.1,5.72 2,5.35 0.45,-1.2 1.5,4.18 3.35,1.35 1.75,-2.47 3.53,-3.07 5.68,-4.58 2.94,0.89 5.63,2.49 8.67,3.03 0.6,2.99 0.4,7.71 -0.01,11.23 0.48,1.8 0.85,6.39 -0.18,6.81 -1.4,-2.86 -3.85,-2.48 -5.38,-2.53 1.09,-1.45 -0.76,-2.94 0.76,-2.52 -1.89,-0.05 -1.63,-1.63 0.12,-0.65 -2.37,-0.6 -2.13,-3.71 -3.2,-4 -1.28,-1.43 -5.02,-2.77 -7.57,-3.05 -2.1,-0.01 -1.41,-1.84 -1.44,-1.56 -1.47,0.45 -3.78,-0.48 -2.6,-2.4 -0.46,-0.51 -1.7,5.09 -3.09,1.82 1.88,-1.26 -4.51,-3.14 -0.21,-2.72 2.01,-1.5 2.99,0.56 3.52,-1.18 0.19,-1.16 -4.95,0.93 -5.48,-1.23 0.65,-1.76 -2.77,-0.49 -2.4,-1.85 z m 17.69,20.64 c 1.89,1.91 6.05,-3.52 1.68,-2.47 -1.02,0.41 -1.3,1.56 -1.68,2.47 z m -5.92,-18.96 c 0.85,1.34 6.01,0.49 1.74,0.18 -0.58,-0.06 -1.16,-0.13 -1.74,-0.18 z",YE:"m 619.35,188.7 c -1.65,-1.63 -1.51,-5.17 -3.76,-6.5 -3.5,0.77 -7.67,0.07 -10.51,2.66 -1.04,1.42 -2.24,4.24 -3.93,2.02 -3.11,0.26 -7.42,-1.26 -9.6,0.49 -0.06,1.62 -1.53,3.91 -1.31,5.28 1.62,1.29 0.77,6.66 3.08,7.25 3.29,1.04 4.59,-2.58 7.98,-2.06 4,-1.07 6.96,-3.4 10.98,-4.53 2.43,-0.43 4.63,-1.26 4.89,-3.69 0.67,-0.45 1.49,-0.49 2.17,-0.91 z m 1.62,12.3 c 6.67,-0.85 -4.22,-1.91 0,0 z",MG:"m 591.79,297.71 c 1.2,2.82 0.97,7.37 2.95,8.73 2.79,1.88 7.92,0.75 8.65,-2.85 1.29,-3.83 2.44,-7.72 3.73,-11.54 1.42,-3.12 1.53,-7.02 3.01,-9.95 -0.6,-1.37 -0.73,-4.62 0.82,-1.66 1.9,-2.04 -0.38,-5.85 -0.85,-8.49 -0.87,-0.29 -1.43,-3.51 -1.9,-1.92 -1.27,0.91 -0.45,3.18 -2.27,4.06 -0.19,0.19 -1.97,-1.07 -0.92,0.84 -0.11,0.93 -1.26,1.18 -0.04,2.22 -0.91,-0.83 -1.7,1.65 -1.62,-0.1 -1.26,1.44 -0.18,2.8 -1.67,1.53 -1.92,1.31 -0.6,1.96 -2.14,1.61 -1.5,0.4 -3.11,0.31 -4.4,1.58 -1.71,2.75 -1.33,5.74 0.02,8.72 0.17,2.82 -2.6,4.67 -3.36,7.2 z",BO:"m 278.97,266.42 c 2.55,0.92 4,-0.24 6.07,-1.58 1.83,-1.59 7.41,-3.49 5.45,0.89 -0.13,2.3 1.75,4.96 4.28,5.04 2.75,0.15 4.61,3.2 7.62,2.87 3.23,0.65 1.54,4.44 2.41,5.56 -0.88,4.62 6.82,0.07 5.04,4.31 2.5,1.45 2.99,4.61 1.14,7.11 1.18,3.96 -2.48,-2.62 -5.16,-0.73 -4.44,-0.31 -7.07,2.7 -7.15,6.88 -0.06,2.22 -5.32,-1.01 -5.13,3.18 -0.31,-2.5 -4.81,-2.91 -5.54,-2.31 -1.52,1.17 -4.75,4.3 -4.51,0.3 -0.15,-2.22 -2.81,-4.42 -1.84,-6.01 0.49,-2.31 -1.73,-4.83 -2.44,-7.31 -1.14,-1.06 3.63,-3.14 0.56,-3.99 -0.32,-3.06 1.21,-5.67 1.05,-9.02 1.58,-1.37 -1.62,-3.65 -1.83,-5.21 z",CI:"m 448.04,217.31 c 1.43,-1.6 0.22,-2.8 1.35,-3.91 0.59,-0.67 -1.34,-2.42 1.31,-1.34 -0.95,-1.89 -0.32,-2.29 -1.43,-4.1 0.82,-2.97 3.97,0.01 4.46,-2.12 1.36,-0.64 0.77,1.88 2.83,0.49 1.82,1.17 2.93,3.02 5.18,1.42 2.95,-0.26 4.17,4.14 1.97,6.31 -2.43,2.51 1.76,6.79 -0.06,7.18 -0.54,-0.78 -3.11,0.31 -2.31,-0.69 -0.76,0.31 -4.38,0.61 -1,0.18 1.23,0.64 -5.22,0.11 -2.31,0.45 -3.1,-0.38 -9.05,4.8 -6.65,-1.41 -0.79,-1.62 -2.12,-1.85 -3.34,-2.47 z",DZ:"m 447.83,158.87 c -1.42,-4.06 2.69,-6.52 5.93,-6.61 2.69,-0.8 5.76,-2.94 8.06,-4.1 -1.83,-1.76 2.54,-2.76 4.09,-2.99 3.3,1.15 3.26,-1.18 1.37,-3.08 0.44,-1.92 -0.86,-4.75 -0.94,-5.19 2.13,-1.53 4.57,-1.62 6.46,-3.05 3.12,-1.42 6.64,-1.21 9.8,-1.92 2.76,0.4 5.39,0.34 7.65,-0.34 1.96,-0.05 4.13,0.03 5.43,0.53 -2.12,1.45 1,6.38 -2.98,7.8 0.47,3.54 5.03,4.58 5.13,8.4 0.23,3.1 1.52,5.64 1.57,9.07 -0.83,1.67 0.58,3.63 -1.37,4.66 1.84,2.02 1.64,5.46 5.08,5.02 4.96,2.86 -2.46,4.34 -4.43,6.15 -3.83,2.19 -7.47,4.71 -10.7,7.7 -2.4,-0.07 -8.29,3.34 -7.19,-1.03 -2.48,-1.01 -5.22,-1.52 -6.15,-4 -6.84,-4.13 -13.22,-9.02 -20.11,-13.09 -2.21,-1.35 -4.46,-2.64 -6.71,-3.93 z",CH:"m 488.45,105.83 c 1.29,-2.14 2.66,-4.1 4.76,-4 2.28,0.2 3.51,-1.55 5.32,0.74 -0.98,1.45 4.27,0.74 1.85,1.94 -0.53,1.03 -1.24,0.84 -2.77,0.41 -0.61,3.86 -2.74,-1.95 -3.77,1.37 -3.11,1.46 -2.84,-3 -5.4,-0.46 z",CM:"m 495.72,222.09 c 0.47,-3.22 3.41,-5.46 5.57,-6.53 1.27,2.08 2.15,2.36 3.29,0.26 0.18,-2.57 2.73,-4.33 2.93,-6.51 2.19,-1.52 1.75,-5.62 4.98,-6.24 1.08,-1.83 -3.37,-3.4 -0.72,-4.23 3.2,2.1 0.52,6.67 3.61,8.69 -2.26,-0.37 -6.76,0.32 -3.01,2.82 3.73,1.91 2.38,5.81 -0.14,7.91 -0.42,2.53 1.22,5.29 2.32,7.46 2.54,0.81 3.26,6.86 -0.01,4.31 -3.04,-0.52 -5.92,-0.7 -9.02,-0.83 -1.81,-0.39 -6.9,2.06 -6.2,-1.63 -0.19,-1.82 -0.76,-2.84 -0.86,-3.33 -1,1.33 -2.12,-2.49 -2.94,-1.55 z",MK:"m 528.66,118.73 c 0.72,-2.03 2.99,-1.79 4.92,-2.22 2.81,0.77 2.27,4.33 -0.81,3.59 -1.58,0.85 -4.12,1.04 -4.1,-1.37 z",BW:"m 527.36,305.27 c 0.81,-2.27 -1.87,-8.12 1.68,-7.74 2.46,-1.12 0.37,-6.2 1.08,-8.87 -0.38,-2.72 4.82,-1.97 6.6,-2.11 1.16,2.25 5.11,-3.32 6.09,0.9 1.25,4.17 6.02,4.42 6.41,8.27 1.43,1.11 5.97,1.59 2.48,3.25 -3.21,0.74 -4.64,4.07 -6.95,6.05 -2.13,0.14 -1.28,4.09 -4.34,3.16 -2.45,-0.7 -5.26,-2.69 -6.1,1.11 -1.71,2.37 -6.82,3.13 -4.88,-1.17 -0.38,-1.17 -1.04,-2.17 -2.09,-2.86 z",UA:"m 533.33,99.45 c 0.38,-2.42 2.62,-0.95 1.55,-3.19 1.51,-2.24 5.9,-2.87 2.56,-5.17 1.24,-1.88 5.76,-1.8 8.43,-1.21 2.08,0.47 4.68,1.02 6.24,0.62 1.37,0.15 3.91,1.05 4.74,0.38 0.25,-3 4.8,-1.41 6.48,-2.35 2.03,-0.95 4.88,1.12 3.49,2.26 0.42,1.56 4.07,0.15 3.62,3.02 2.25,0.9 4.97,-0.87 6.7,1.19 1.53,0.31 6.03,0.27 5.9,2.18 -1.87,0.91 0.64,0.9 -1.06,1.74 3.11,2.67 -4.06,1.43 -3.8,3.68 -2.7,1.82 -6.53,0.96 -9.21,3.43 2.58,-2.32 -2.41,0.17 0.41,1.46 1.07,1.21 5.82,-0.36 2.73,1.55 -2.21,-1.2 -6.6,3.05 -7.27,0.97 1.25,-2.46 -5.49,-1.56 -0.98,-2.96 4.95,-1.59 -4.78,-0.76 -3.46,-2.07 -3.23,-0.26 4.67,-0.42 0.65,-0.42 -1.37,-1.95 0.32,-0.22 -1.65,-0.07 -0.22,-1.27 -3.13,2.33 -3.62,0.72 0.24,0.25 0.69,1.87 -1.3,2.11 1.92,-1.05 -1.71,-0.2 -0.1,0.98 -0.61,-0.67 -6.41,0.78 -2.76,-1.71 0.93,-2.27 1.5,-1.55 3.85,-1.53 -0.18,-1.02 -2.58,-1.29 -1.6,-2.59 -0.99,-1.59 -2.48,-2.46 -4.37,-2.61 -2.58,-1.71 -4.68,1.24 -7.34,0.98 -2.41,0.31 -5.68,-0.1 -7.92,-0.87 -0.4,-0.01 -0.51,-0.49 -0.9,-0.53 z",KE:"m 565.98,238.45 c -1.07,-3.74 2.54,-5.6 3.18,-8.69 -1.15,-1.8 -1.5,-4.12 -2.68,-5.32 0.29,-3.5 6.28,-1.26 8.65,-0.77 2.74,2.44 6.51,2.92 8.94,0.29 1.02,-0.16 4,0.51 3.69,0.99 -3.5,3.08 -1.82,7.92 -2.24,12.02 0.81,1.63 2.52,4.78 -0.33,4.2 0.79,1.52 -2.38,1.75 -2.35,3.92 -0.94,2.23 -1.95,5.44 -4.46,2.07 -2.59,-1.71 -2.38,-4.35 -6.03,-5.25 -2.2,-0.99 -4.16,-2.68 -6.39,-3.46 z",TW:"m 805.03,169.47 c -0.48,1.96 2.33,6.52 2.74,2.31 0.92,-1.65 3.91,-7.83 0.29,-6.84 -1.39,1.01 -2.44,2.92 -3.04,4.53 z",JO:"m 568.85,153.04 c 0.91,-3.06 1.42,-6.72 2.23,-9.42 2.89,2.96 6.55,-2.17 8.62,-1.24 2.67,3.53 -1.78,4.19 -4.98,4.69 0.47,0.91 3.25,2.52 2.05,2.92 -1.97,1.51 -4.82,4.48 -7.93,3.06 z",MX:"m 146.91,144.12 c 4.03,-0.61 6.19,-0.66 9.33,0.98 3.78,0.9 7.36,3.02 11.36,2.39 1.8,-0.09 4,0.57 4.58,-1.3 3.87,-0.71 7.78,1.72 9.43,5.05 1.12,2.54 5,3.88 6,0.6 4.48,-1.37 5.43,4.11 8.08,6.22 0.1,3.47 3.3,3.99 5.99,4.78 1.66,0.21 -1.86,4.93 -0.82,6.79 -1.12,2.04 1,5.79 0.91,5.71 -2.07,-3.94 0.48,2.62 1.93,3.59 1.77,1.76 2.02,5.25 2.55,3.87 2.27,0.76 5.91,2.4 5.97,1.14 2.66,-0.29 4.19,-1.31 5.88,-0.17 1.6,-1.88 -1.16,-0.68 1.42,-1.96 2.46,-2.61 0.26,-6.77 5.15,-6.31 3.48,-0.81 3.86,-0.36 6.33,-0.19 0.26,2.44 -2.58,4.46 -2.14,5.95 0.62,0.64 -0.9,4.29 -1.51,1.72 -1.52,1.82 -2.49,2.76 -4.99,2.52 -1.62,-0.04 -3.27,-0.35 -3.18,1.62 -2.43,-0.32 5.21,3.74 0.25,3.3 -3.93,-1.31 -2.58,6.77 -4.69,2.76 -0.69,-1.06 -6.52,-4.51 -3.45,-2.55 -3.28,-1.38 -1.63,-0.96 -5.09,0.37 -3.83,1.6 -7.15,-1.96 -10.81,-2.44 -3.76,-1.96 -7.39,-3.38 -11.04,-5.17 -1.44,-1.39 -7.37,-3.65 -4.34,-5.73 -1.08,-0.83 1.2,-2.68 -1.02,-3.9 -0.19,-2.82 -5.05,-6.01 -5.47,-6.73 -0.63,-1.05 -3.04,-3.19 -3.99,-3.34 -1.06,0.21 -1.42,-1.5 -0.43,-2.24 -0.91,-1.2 -4.58,-1.95 -3.82,-4.15 -2.28,0.08 -4.27,-2.95 -5.37,-4.94 -1.41,-2.21 -1.42,-5.35 -4.29,-5.46 -1.79,0.17 -3.23,-2.4 -2.32,0.28 -0.23,3.69 3.17,5.75 4.96,8.27 0.78,1.45 3.2,4.8 3.49,5.02 0.49,-0.56 2.41,4.15 3.06,5.6 0.06,2.72 2.23,0.16 2.89,3 2.19,1.7 -2.08,2.99 -1.92,0.26 -2.75,-2.57 -6,-2.84 -5.08,-6.62 -1.52,-0.71 -2.61,-3.46 -3.45,-2.34 -1.97,-0.2 -6.6,-4.19 -2.34,-2.76 1.72,1.22 0.86,-0.5 0.25,-0.24 1.51,-2.83 -4.23,-4.37 -4.82,-7.38 -1.2,-1.22 -1.91,-4 -3.41,-5.63 z",AE:"m 614.9,167.34 c 1.12,1.33 4.68,0.15 7.22,0.3 0.84,-0.73 3.89,-4.15 5.57,-5.23 0.32,1.15 1.11,4.3 -0.84,3.09 -0.43,1.59 0.57,2.62 -0.88,3.06 -0.36,3.02 -1.96,4.08 -4.92,2.93 -3.16,0.32 -4.78,-1.53 -6.14,-4.15 z",BZ:"m 224.45,190.94 c 0.24,-3.09 -0.56,-6.25 2.53,-7.3 0.29,1.75 0.85,5.64 -1.6,6.97 l -0.06,0.33 z",BR:"m 266.7,256.8 c 1.46,-2.11 2.75,-3.84 3.14,-6.74 2.29,-1.46 5.6,-3.35 7.84,-2.31 0.57,-3.26 2.17,-6.93 1.28,-10.19 -1.94,-1.19 -1.76,-4.64 1.04,-3.76 0.18,-1.73 -3.3,-0.97 -1.54,-3.06 3.03,0.03 4.52,0.14 6.33,-1.04 1,1.65 1.4,3.9 4.06,3.71 1.53,-1.52 1.23,1.67 2.51,-0.87 2.04,-0.84 3.65,-2.08 4.62,-3.66 -2.79,0.11 -1.74,-4.02 -3.65,-5.3 1.71,0.49 4.48,1.94 5.53,1.46 0.52,-2 6.46,-0.98 5.62,-4.04 2.95,-1.06 1.23,2.16 2.82,2.76 -0.07,2.41 -1.85,6.71 1.73,8.09 1.93,0.81 3.93,-2.13 6.16,-1.54 2.09,0.09 2.63,0.19 2.43,-1.56 2.53,-1.05 4.4,1.51 6.59,0.45 3.41,1.99 3.65,-5.35 6.01,-4.79 -0.54,-2.73 1.11,2 1,0.3 0.25,3.01 0.78,5.59 3.32,6.7 -0.09,1.92 -4.17,4.12 -5.06,7.04 -0.34,1.56 -4.33,1.35 -1.45,1.78 1.67,0.03 4.62,-3.81 4.25,0.41 1.46,1.61 4.74,-1.62 3.38,1.58 -0.5,0.81 2.36,-3.23 2.79,-2.35 1.43,-0.16 -0.83,0.06 0.54,-1.23 1.18,-1.57 2.7,-1.1 3.66,-0.82 0.73,0.05 1.03,0.82 2.11,0.83 1.07,0.67 2.48,0.63 2.32,1.38 0.78,-0.13 1.62,-0.15 1.87,0.67 1.16,0.13 -1.26,2.24 0.79,1.18 -0.69,1.26 -1.58,4.64 0.01,1.46 1.86,-2.63 -0.55,1.94 1.3,-0.46 1.59,-1.38 4.26,0.93 5.97,0.64 2.74,0.42 6.44,-0.33 8.95,2.53 2.13,3.18 5.69,3.83 8.9,4.58 1.09,3.16 1.62,4.91 1.4,7.38 -0.38,3.15 -2.76,4.44 -3.38,6.11 -2.21,1.74 -3.17,2.31 -3.54,3.19 -0.25,0.79 -2.34,6.81 -3.79,3.74 -0.42,1.14 -1.31,3 -0.85,3.69 -0.01,1.34 0.72,6.53 -0.56,9.43 -0.34,2.2 -1.91,4.24 -1.69,6.7 -1.24,2.13 -3.31,3.79 -3.37,6.7 -2.84,0.59 -2.01,3.47 -5.06,2.71 -1.22,-1.53 -0.6,0.65 -3.13,0.33 2.75,-0.82 -3.29,-0.52 -1.79,0.67 -1.69,0.79 -3.67,1.71 -4.9,1.86 -3.43,1.46 -4.25,4.41 -6.41,3.93 1.76,0.96 -0.49,1.07 0.3,2.05 -0.9,0.64 0.67,3.24 0.07,4.84 -0.26,2.68 -4.11,4.03 -5.09,7.86 -0.66,2.16 -7.08,5.51 -3.13,2.81 1.27,-0.99 4.21,-4.65 1.47,-3.73 -1.31,-1.93 -0.07,1.79 -0.93,1.26 -1.83,1.86 -2.32,3.24 -2.93,5.11 0.04,2.12 -4.51,4.63 -2.66,1.15 0.95,-2.58 -4.38,-3.84 -5.93,-5.87 -1.77,1.23 -2.88,-3.55 -5.59,-1.85 1.22,-1.65 4.21,-4.54 5.22,-5.85 2.71,-1.71 7.57,-3.48 4.51,-7.29 -3.94,1.35 1.69,-5.48 -2.17,-4.73 -3,1.6 -1.94,-7 -4.92,-4.41 -2.28,0.5 -5.22,-0.81 -3.49,-3.69 -0.95,-1.92 -0.41,-2.95 -0.14,-4.92 1.86,-2.56 0.16,-4.82 -1.6,-6.55 1.79,-3.82 -6.05,0.49 -4.98,-4.03 -0.75,-1.12 0.85,-5.38 -2.7,-5.56 -2.5,0.22 -4.23,-2.13 -6.37,-2.71 -2.28,0.01 -5.18,-1.62 -4.99,-4.14 -0.44,-2.07 0.99,-4.92 -2.61,-3.53 -2.64,0.91 -4.45,2.73 -6.87,3.62 -2.12,-1.36 -6.87,1.41 -5.16,-3.45 1.08,-3.21 -4.45,3.31 -4.83,-1.02 -1.8,-0.36 -2.28,-0.1 -2.04,-1.84 -1.24,-1.17 -1.54,-2.65 -2.51,-3.78 z m 64.34,-17.04 c 1.69,1.81 4.29,0.77 5.82,-0.97 2.76,-3.8 -3.6,-2.64 -5.41,-2.38 0.34,1.32 -0.93,1.2 0.01,2.25 1.2,0.1 -1.24,-0.03 -0.42,1.1 z",SL:"m 435.02,210.22 c 1.93,-1.56 5.36,-4.95 7.22,-0.74 1.09,1.13 -0.89,3.59 1.16,2.39 -1.16,2.19 -3.35,5.68 -6.08,2.91 1.45,-0.71 -1.26,-0.53 -1.72,-2.37 -1.36,-0.45 1.89,-1.32 -0.29,-0.89 -0.48,-0.9 1.07,-0.74 -0.28,-1.31 z",ML:"m 437.94,194.09 c 2.04,0.27 0.91,-4.28 3.33,-1.43 0.66,-0.14 2.73,-0.47 4.23,-0.55 2.58,-0.13 7.07,-0.19 10.4,-0.03 2.61,-1.65 -0.96,-6 -0.22,-8.98 -0.61,-5.94 -1.17,-11.89 -2.04,-17.8 2.18,0.22 4.68,-0.68 6.45,0.99 5,3.35 10.11,6.64 14.98,10.12 0.79,2.34 3.83,2.32 5.75,3.54 -0.87,2.43 0.94,2.51 2.85,1.83 -0.17,3.51 0.95,7.94 -1.95,10.48 -3.03,-0.07 -6.2,0.86 -9.14,1.22 -2.98,-1.16 -5.79,2.3 -8.35,2.58 -0.66,1.62 -1.51,1.07 -1.88,2.53 -3.32,-2.45 -1.5,3.09 -4.45,3.36 -1.6,0.57 0.02,4.22 -2.58,4.94 -1.29,0.07 -0.15,-2.12 -1.6,-1.03 -0.4,1.08 -1.89,1.12 -3.22,0.55 -1.36,2.1 -1.08,-2.87 -2.67,-1.59 1.46,-1.31 -0.82,-2.75 -1.47,-4.3 -0.46,1.21 -3.4,1.35 -4.37,1.04 -1.52,0.18 -2.09,0.26 -1.85,-1.7 -0.05,-2.03 -2.16,-2.23 -1.89,-4.57 -0.51,-0.29 0.13,-0.87 -0.33,-1.19 z",CD:"m 505.77,251.86 c 1.11,-0.45 1.26,-4.86 3.32,-2.51 1.04,-1.47 3.32,-2.13 3.01,-0.08 3.18,-1.06 5.29,-4.26 5.08,-7.65 2.52,-2.81 4.78,-5.42 4.56,-9.55 0.61,-2.99 2.06,-6.3 2.13,-8.9 2.02,-4.45 4.64,0.54 7.38,0.4 2.12,0.77 3.39,0.33 4.39,-1.51 2.2,1.04 4.02,-1.17 6.29,-0.61 1.13,-1.59 4.43,0.23 6.29,0.12 1.5,3.66 5.21,-0.2 6.72,2 2.14,1.43 3.1,2.6 2.3,5.16 3.85,1.6 -3.17,3.66 -2.74,6.36 0.38,3.32 -2.26,5.6 -2.13,8.35 1.51,3.41 0.61,7.8 2.24,10.97 1.86,1.39 4,5.34 0.03,4.67 -3.03,-0.07 -4.18,2.35 -3.41,4.97 -0.48,2.25 -0.88,7.37 2.73,5.97 1.08,-0.64 0.96,4.79 -0.18,2.86 -2.42,1.32 -2.68,-3.38 -5.42,-2.94 -0.95,-3.24 -2.96,0.76 -5.34,-1.31 -1.27,-0.74 -0.93,-2.21 -3.01,-0.96 -0.5,-0.98 -1.88,-1.79 -3.82,-0.94 -1.87,0.16 -3.53,0.88 -2.39,-1.55 -1.9,-2.75 -1.01,-6.14 -1.62,-9.07 -1.53,-0.06 -3.51,0.31 -3.49,-1.1 -2.15,0.08 -3.24,0.79 -3.08,3.1 -2.11,-0.79 -4.92,1.62 -5.92,-1.16 -1.4,-2.23 -1.11,-6.26 -5.05,-4.86 -2.81,-0.23 -6.65,0.7 -8.88,-0.26 z",IT:"m 507.51,129.94 c -3.05,-1.56 2.25,-1.9 3.67,-1.24 1.87,-0.1 5.68,-1.88 2.74,1.15 1.11,2.58 -1.33,3.18 -3.09,1.38 -1.28,-0.02 -2.12,-1.1 -3.32,-1.29 z m -12.37,-8.02 c -1.85,-1.43 1.82,-2.01 2.86,-1.99 1.23,0.55 1.16,4.18 0.05,5.58 -1.7,1 -3.95,0.46 -2.47,-1.54 -0.34,-0.72 -0.13,-1.18 -0.44,-2.05 z m -4.9,-13.17 c 3.1,-0.02 -1.04,-2.76 2.09,-2.35 2.48,-0.19 2.62,-2.39 4.57,0.34 -0.27,-2.86 2.83,-0.83 2.93,-1.72 0.94,-1.14 3.02,-1.32 5.54,-1.83 0.95,1.66 5.23,1.16 4.21,3.41 2.1,1.41 -2.13,-0.39 -3.2,1.12 -0.43,-0.49 -1.01,1.04 0.12,1.66 -2.29,2.57 4.77,3.24 4.6,6.55 0.65,2.38 6.19,0.94 5.12,2.54 0.55,2.04 7.22,1.64 6.66,5.04 -1.28,-1.22 -5.59,-2.89 -4.92,0.65 3.38,1.22 -0.58,2.57 -1.22,4.3 -2.73,1.61 -0.35,-1.81 0,-2.41 -0.55,-2.94 -2.96,-3.35 -4.6,-4.73 -1.02,-1.37 -5.06,-1.66 -6.86,-3.77 -3.53,-0.91 -3.86,-4.73 -7.02,-6.29 -1.56,-2.47 -6,3.28 -5.14,0.18 -3.86,0.28 -0.56,-1.71 -2.89,-2.69 z",SO:"m 585.58,229.11 c 0.69,-4.69 6.2,-7.52 10.68,-7.31 2.28,-1.77 4.01,-4.34 6.21,-6.28 1.31,-1.33 3.8,-3.01 0.32,-2.36 -3.72,-1.45 -8.97,-1.63 -11.19,-5.33 -3.05,-1.74 0.89,-6.67 2.05,-2.34 2.51,2.1 5.36,-0.94 8.17,-0.4 2.93,-1.76 6.73,-0.59 9.82,-2.27 3.25,-3.21 1.61,3.36 2.25,3.39 1.32,-0.12 -2.15,0.54 -0.97,2.35 -1.4,3.16 -3.42,5.74 -4.82,8.9 -1.89,4.51 -5.29,8.36 -9.04,11.49 -4.21,2.3 -7.59,5.75 -10.67,9.35 -0.46,0.71 -1.19,3.14 -2.01,0.66 -1.7,-2.5 -0.43,-5.99 -0.81,-8.9 l 0,-0.47 z",AF:"m 639.74,139.82 c 1.69,-1.45 1.23,-5.93 4.7,-3.54 1.43,1.33 2.67,-1.68 3.45,-1.57 3.5,0.06 3.17,-4.17 6.08,-4.32 2.12,-0.55 5.14,0.72 7.27,1.14 1.63,-1.71 3.03,0.55 3.91,-1.65 2.23,1.22 1.92,-4.15 4.62,-1.79 0.27,1.14 0.98,1.67 0.38,3.63 1.58,2.07 5.54,-3.13 6.54,-0.84 2.18,-0.77 3.22,-0.4 2.08,0.59 -2.94,0.85 -7.6,-0.11 -9.19,2.95 2.67,2.03 -1.37,3.58 -0.54,5.27 -2.16,0.74 -4.42,-0.54 -2.19,2.29 -3.26,-0.32 -1.84,4.85 -4.25,4.39 -1.55,-0.59 -3.3,0.83 -3.34,1.36 -3.63,-1.33 -2.64,3.38 -4.45,4.24 -4.18,0.55 -8.59,1.68 -12.69,0.06 -3.07,-0.31 4.68,-5.14 -1.31,-5.25 -0.69,-1.82 -0.25,-4.72 -0.62,-5.58 -0.62,-0.14 -0.37,-0.99 -0.44,-1.38 z",BD:"m 716.95,161.12 c 0.8,-0.22 1.8,1.17 2.06,0.38 1.27,1.37 2.41,0.19 2.29,3 1.76,0.89 7.81,-0.72 6.42,1.69 -1.03,1.41 -4.28,2.92 -2.37,4.04 1.32,2.74 1.74,-3.85 2.7,-0.2 -0.07,1.9 2.06,6.05 -0.4,5.62 0.02,3.91 -0.92,-4.11 -1.4,-3.01 -0.46,-2.79 -2.64,1.38 -2.92,-2.19 0.42,-2.1 -0.97,-0.41 -0.61,-0.49 0.43,1.3 -0.12,2.01 0.29,3.26 -0.2,-0.37 -1.91,2.15 -0.98,0.24 -0.49,0.49 -0.68,-0.2 -0.82,-0.86 0.03,2.56 -1.46,1.13 -0.88,0.69 -0.26,0.81 -0.26,-1.15 -0.23,0.9 -0.62,0.75 -0.11,-1.06 -0.79,0.58 -0.52,-2.38 -1.15,-4.56 -1.56,-6.27 1.17,-1.53 -3.4,-2.26 -0.76,-3.09 1.6,-0.72 2.09,-1.06 0.18,-1.93 -2.43,-0.94 1.25,-2 -0.22,-2.35 z m 6.09,10.81 c 0.03,1.88 0.88,2.41 0.6,0.18 -0.25,-0.23 -0.31,-0.96 -0.6,-0.18 z",DO:"m 272.83,184.89 c -0.53,-1.77 0.29,-3.1 0.36,-5.21 2.54,-0.49 5.56,1.5 6.64,1.93 -2.86,-0.43 5.6,2.18 1.12,2.37 -2.45,-0.36 -4.34,0.19 -6.19,0.35 -0.48,1.5 -1.01,2.47 -1.93,0.57 z",GW:"m 425.53,200.98 c 2.38,-0.33 4.57,-1.22 7.02,-1.01 2.59,-0.24 0.04,1.59 1.2,2.64 -2.43,-0.49 -3.7,3.48 -4.57,1.09 0.75,-1.43 0.86,-0.14 -0.24,-1.09 2.76,-0.42 0.44,-0.85 -1.35,0.03 2.05,-1.52 -2.31,0.28 -0.46,-1.61 -0.69,0.1 -0.93,0.55 -1.61,-0.05 z",GH:"m 463.12,218.04 c -0.22,-3.53 2.96,-6.14 1.13,-9.66 -0.36,-2.09 -1.06,-4.54 2.09,-3.68 2.18,0.85 5.88,-1.57 5.79,1.26 1.67,1.44 -0.15,3.04 1.26,3.69 0.13,2.29 0.34,4.75 0.19,7.43 1.73,0.4 1.94,2.6 0.15,2.09 -0.84,-0.97 -1.55,-0.77 0.01,-0.12 -3.22,0.88 -6.4,4.07 -9.73,2.5 -1.54,0.11 1.78,-1.18 -0.32,-1.76 0.11,-0.64 -0.23,-1.24 -0.57,-1.76 z",AT:"m 498.33,102.64 c 0.57,-1.73 2.38,0.79 3.17,-0.71 2.12,0.58 4.66,-0.95 6.64,-0.11 -1.9,-2.04 1.39,-2.14 2.12,-3.4 2.58,1.59 3.61,-1.95 6.56,0.05 3.57,-0.79 2.09,3.65 1.2,3.27 -0.3,2.95 -3.14,2.19 -5.71,3.3 -2.34,-0.32 -6.06,-0.31 -7.08,-1.78 -2.62,0.75 -4.48,0.69 -6.73,-0.02 l 0.01,-0.32 z",SE:"m 503.07,69.37 c 2.29,1.64 0.79,-3.19 3.48,-2.93 1.06,-1.32 -1.91,-2.64 0.84,-2.85 0.63,-1.42 -2.73,-1.22 -1.44,-3.27 -1.05,-2.33 -0.33,-5.19 2.99,-4.95 1.69,0.79 3.22,-1.25 0.83,-1.43 2.34,-0.66 2.48,-3.52 3.11,-4.4 1.67,-0.14 3.98,-2.03 4.49,-3.23 -1.16,-1.42 3.27,-2.43 4.88,-2.38 -0.36,-2.99 6.56,0.79 5.61,-1.45 0.75,-2.79 6.02,1.11 8.74,1.36 1.76,0.22 -0.56,2.09 1.31,2.27 -1.16,0.73 1.47,1.77 -0.24,2.84 2.9,2.24 -1.35,1.51 -1.95,1.63 -2.18,-0.67 -1.63,1.02 -3.03,0.7 0.16,0.51 -1.39,0.13 -0.93,1.34 -3.45,0.72 2.08,1.59 -1.5,2.46 -1.99,1.99 -4.11,1.55 -6.45,2.85 -1.95,0.25 0.37,0.75 -1.88,0.98 0.4,0.08 -1.63,-1.45 -0.21,0.24 -0.86,0.96 -1.99,0.69 -1.09,1.46 -0.52,2.59 -1.41,0.78 -1.07,2.37 -0.25,3.19 3.46,1.91 4.93,4.05 0.88,1.07 -4.17,1.6 -0.98,1.79 -0.31,-0.45 -1.15,1.75 -2.7,0.64 0.72,1.18 -3.27,1.21 -3.17,1.26 2.66,0.54 -1.56,0.14 0.83,1.06 -0.62,1.31 -0.04,1.66 -1.19,4.34 -0.86,3.05 -6.6,0.05 -5.62,3.58 -2,-0.05 -4.22,0.96 -4.17,-1.25 -1.78,-1.45 1.78,-1.69 -1.27,-3.17 -1.84,-1.15 -1.31,-2.93 -1.87,-3.66 -0.87,-0.06 0.36,-0.36 -0.86,-0.2 -0.39,0.39 -0.79,-1.77 -0.43,-2.02 z m 19.78,5.86 c 1.67,-0.89 2.56,-3.93 -0.46,-1.71 -0.09,0.69 -0.24,2.77 0.46,1.71 z m -5.45,1.61 c 0.61,1.22 3.14,-5 0.72,-1.2 -0.33,0.31 -0.83,0.66 -0.72,1.2 z",TR:"m 544.2,124.61 c -0.19,-3.46 5.6,-2.64 5.36,-2.55 1.31,-0.09 3.86,0.31 2.53,-0.61 2.88,-0.79 2.42,0.17 0.35,-1.24 2.89,-0.92 6.58,0.52 9.35,-1.99 3.33,-0.86 6.8,-1.32 9.09,0.22 1.54,0.31 3.87,1.67 6.25,1.93 3.71,0.27 7.46,-0.25 10.79,-1.28 2,-0.43 3.08,-0.35 4.43,0.78 1.22,1.57 -0.08,3.67 2.78,3.29 2.74,1.29 -2.71,1.04 -0.37,3.34 -0.08,2.03 0.48,2.83 1.38,4.65 -1.79,0.36 -4.56,-1.14 -6.84,-0.22 -3.3,-0.31 -6.8,1.88 -10.22,1.34 -1.74,-0.49 -4.1,0.38 -5.62,0.15 0.58,1.1 -1.19,2.8 -1.8,1.96 -1.11,-0.92 2,-3.14 -0.57,-2.19 -0.84,1.09 -4.26,-0.84 -5.19,1.34 -2.98,3.2 -7.33,-4.03 -9.27,-0.46 -1.59,2.06 -4.09,-0.49 -5.63,-0.92 -1.1,0.37 -1.36,0.57 -1.51,0.13 -4.23,0.77 3.27,-0.76 -0.58,-0.85 -2.1,0.18 -1.05,0.28 -0.63,-0.64 -1.89,-0.4 -0.01,-2.04 -2.33,-2.61 -1.69,0.79 -0.65,-2.62 0,-0.43 2.24,0.08 -0.69,-0.86 0.78,-1.55 -1.97,-1.18 1.52,-2.33 -2.52,-1.57 z m -0.08,-3.56 c 2.53,-1.48 -0.23,-4.36 3.83,-3.6 1.87,-0.15 2.85,1.97 4.5,2.55 -2.73,0.01 -5.63,0.78 -7.84,2.96 0.29,-1.45 3.13,-1.91 -0.16,-1.53 l -0.21,-0.12 z",UG:"m 553.91,238.9 c -0.39,-4.17 2.3,-6.77 4.75,-9.42 -2.41,0.04 -1.63,-5.56 0.75,-4.17 1.7,0.21 3.32,-0.39 5.57,-0.42 2.14,-2.38 2.72,2.71 4,4.52 0.43,3.24 -4,4.5 -2.99,8.06 -1.04,2.18 -5.96,0.5 -8.56,0.92 -1.39,0 -2.88,2.34 -3.52,0.5 z",MZ:"m 555.68,277.82 c 3.38,-0.51 7.85,-4.21 10.07,-1.42 4.51,-0.87 -0.98,5.86 3.59,6.74 0.61,2.22 -0.65,-2.7 1.56,-2.32 2.53,-3.74 -2.44,-6.16 -3.24,-8.98 -1.9,-4.36 3.49,-4.31 5.89,-3.25 1.32,-0.53 2.84,0.44 3.87,-1.29 2.26,1.88 7.76,-4.55 6.8,-0.28 -0.52,2 0.53,4.58 0.1,6.64 0.2,2.16 0.97,3.01 0.4,4.25 -0.43,1.98 -3.01,5.04 -5.57,5.78 -3.19,1.08 -5.18,2.18 -6.33,4.8 -1,-0.6 -4.36,4.09 -4.82,2.41 0.15,2.61 1.46,4.11 1.84,6.62 -0.17,2.65 1.06,-1 0.49,1.71 0.65,1.67 -1.08,3.66 0.13,3.82 -0.58,3.45 -10.01,3.24 -7.47,6.38 2.13,1.12 -2.26,3.35 -1.99,0.39 -0.65,-3.17 -0.44,-6.79 -1.88,-10.02 -0.89,-2.83 3.92,-3.48 3.06,-6.29 2.7,-1.94 -0.57,-4.19 1.33,-6.02 -0.33,-2.62 0.82,-5.75 -2.86,-5.66 -2.16,-2 -5.13,0.08 -4.95,-4.02 z",JP:"m 856.18,127.94 c -1.84,1.01 0.27,2.23 0,0 z m 8.64,-20.15 c 0.38,2.34 0.31,7.64 -3.24,5.92 0.46,1.55 -3.12,2.39 -0.87,4.1 -1.78,2.55 5.43,-0.24 1.23,-0.76 -1.88,-2.01 2.04,0.05 3.04,-1.31 1.98,0.42 4.65,3.42 5.48,-0.05 1.5,-1.31 5.08,-0.78 5.48,-2 -1.46,0.31 -1.51,-1.51 -0.94,-2.78 -2.12,2.33 -5.03,0.1 -7.2,-1.25 -0.96,-0.69 -1.68,-2.33 -2.98,-1.87 z m -25.3,33.26 c -1.96,1.25 -0.79,0.12 0.08,1.06 -0.66,1.4 2.1,2.1 2.13,-0.17 1.59,-0.98 2.83,0.63 3.83,-1.61 -0.73,-3.04 -3.47,0.43 -4.98,-0.63 -0.33,0.44 -0.48,1.13 -1.04,1.36 z m -4.43,-0.85 c 1.96,-0.48 3.46,0.68 4.31,-1.17 2.06,0.38 3.71,-0.69 5.65,-1.14 1.06,0.33 3.92,-0.05 1.52,1.29 -0.04,2.5 3,2.91 3.71,0.24 2.76,0.37 -1.03,-2 1.2,-2.22 -0.8,1.25 2.05,-0.16 0.73,1.18 2.25,-0.07 3.07,-0.2 4.69,-1.45 -0.84,1.6 1.08,1.49 1,-0.46 1.69,1 1.25,-1.62 2.53,-0.79 -2.34,2.74 0.98,1.33 1.51,-0.29 0.85,-0.39 -0.74,-3.11 1.07,-3.95 0.41,-1.16 -0.91,-4.42 1.52,-3.5 0.7,-2.16 2.14,-4.91 -0.38,-6.96 1.19,-1.59 -1.49,-2.88 -1.55,-1.2 2.79,-0.41 -0.65,2.12 -0.91,-0.01 -0.96,0.48 -1.8,1.83 -1.8,3.26 -1.26,0.07 2,1.61 -0.15,3.1 -1.01,2.68 -5.58,6.7 -7.62,5.5 -0.92,-0.68 2.17,-2.09 -0.47,-1.18 -0.41,1.88 -2.34,3.69 -2.79,5 -2.16,1.03 -1.9,-1.28 -4.8,0.22 -3.81,-0.99 -5.72,2.44 -9.06,3.44 l -0.04,0.57 z m -3.05,1.69 c -1.65,0.19 1.8,1.81 -0.39,1.11 1.03,1.15 2.45,0.91 1.24,-0.52 3.49,0.55 -2.05,4.9 1.23,5.48 -0.19,-1.7 1.19,-1.77 0.25,0.42 2.1,-0.6 3.06,-4.2 3.57,-5.74 -1.92,-0.54 0.47,-2.18 -2.22,-1.51 -0.75,-2.25 -2.9,0.43 -3.68,0.77 z m -5.99,20.39 c 1.34,-1.15 2.28,-2.78 0.65,-1.35 -0.25,0.41 -0.91,0.79 -0.65,1.35 z",NZ:"m 980.38,359.42 c 1.59,0.63 -0.23,-0.77 1.18,-0.76 -2.77,0.04 -0.77,-0.16 -1.18,0.76 z m -23.67,-7.74 c 2.14,2.42 4.24,-1.63 5.73,-3.43 -0.09,-1.29 0.81,-3.97 2.77,-2.23 0.11,-1.33 3.61,-5.66 -0.04,-4.42 -2.7,2.86 -6.03,-1.77 -6.17,-2.62 -1.34,-2.4 0.57,3.23 -1.32,0.45 -1.54,0.37 -1.17,-2.95 -2.07,-3.17 1.31,0.47 -1.09,-2.61 -0.89,-1.21 -0.64,-1.78 -3.1,-0.5 -3.03,-2.47 -1.58,-0.97 0.72,3.28 1.2,2.63 -0.84,-0.87 2.64,4.36 1.51,2 1.37,-0.12 1.56,1.23 0.71,1.18 0.74,1.83 2.19,0.91 1.26,2.12 0.85,1.57 0.3,5.01 -2.42,5.22 0.8,1.83 6.18,3.35 2.38,6.16 h 0.35 z m -23.16,13.34 c 1.43,-0.64 -0.04,1.31 2.71,0.51 2.11,0.86 4.97,2.3 7.18,0.16 2.23,-1.12 1.62,-1.84 3.08,-4.28 1.22,-2.98 4.59,-2.2 5.41,-2.8 -2.75,-1.32 2.77,-3.59 3.2,-5.59 -1.26,-1.38 1.75,-1.87 -0.72,-1.42 1.64,-1.35 -1.32,0.96 -0.13,-0.9 -1.84,1.5 -2.02,0.63 -3.02,-0.32 -1.28,-0.72 1.64,-1.16 -0.99,-0.6 -1.6,2.24 -2.96,4.34 -4.78,6.58 -3.04,1.83 -7.46,3.44 -9.11,5.24 -1.94,0.98 -0.67,1.81 -2.02,1.88 -1.19,0.79 2.01,-0.37 -0.12,0.44 1.48,0.34 -1.01,0.13 -0.68,1.1 z m 4.23,2.52 c 0.39,-1.85 -2.66,1.98 0.05,0.67 1.38,0.02 -0.1,-0.55 -0.05,-0.67 z", CU:"m 241.98,175.35 c -2.12,-0.41 1.52,-2.41 0.61,-0.14 z m -5.53,-1.26 c 2.2,-0.06 1.37,-3.05 4.41,-3.05 3.72,-0.8 5.87,-0.72 9.13,0.22 1.3,1.13 4.9,1.77 6.58,3.1 1.31,0.86 -0.15,-0.99 1.4,0.57 -0.58,0.42 2.65,0.95 4.2,1.75 -1.67,0.65 7.22,1.75 2.29,2.73 -2.01,0.36 -5.38,-0.1 -8.08,0.44 1.59,-1.04 2.34,-2.98 -0.59,-2.44 -1.98,-0.84 -1.67,-3.44 -4.25,-2.54 -2.82,-1.76 -5.11,-1.09 -7.22,-2.28 3.23,-0.68 -3.4,-1.54 -3.71,0.57 -1.75,0.07 -2.86,1.63 -4.15,0.93 z",VE:"m 268.42,209.82 c 0.97,-2.58 3.12,-7.68 5.32,-7.2 -3.36,0.3 1.55,3.51 -1.64,5.08 -0.12,3.73 4.58,2.02 1.93,-0.83 -2.24,-2.86 4.14,-3.18 4.38,-3.78 -2.75,0.43 -0.26,-3.72 0.01,-0.24 1.91,0.5 4.17,1.08 4.61,3.31 3.18,-1.09 6.18,0.35 8.7,1.24 2.75,-1.2 3.12,-1.35 2.53,-1.66 1.61,-0.19 8.79,-0.45 3.86,0.26 -2.39,-0.24 1.52,1.3 -0.96,1.24 1.21,-0.35 1.63,0.38 2.13,0.76 -0.13,0.15 0.68,0.47 0.21,-0.33 0.87,0.79 2.07,-0.05 3.73,1.72 -0.17,0.75 -2.12,2.37 -1.78,2.29 0.79,0.3 6.07,-1.08 3.6,1.46 -3.33,1.26 1.48,3.28 -2.31,3.6 -1.81,1.84 0.24,3.32 0.99,5.23 -1.62,1.67 -4.02,1.65 -5.92,2.58 0.24,2.11 -2.51,-0.76 -3.65,-0.22 -3.92,-1.92 0.79,2.1 0.24,4.27 3.91,0.06 -0.98,2.09 -1.1,2.94 -1.76,0.26 -2.96,3.24 -3.63,1.35 -3.83,2.2 -3.61,-4.88 -5.84,-5.46 2.84,-1.65 -2,-5.15 0.54,-7.64 1.68,-4.03 -5.54,0.73 -6.18,-2.84 -1.67,-2.55 -5.64,0.37 -7.24,-2.33 0.7,-1.68 -0.57,-5.67 -2.53,-4.79 z",PT:"m 445.98,125.1 c 1.66,-1.86 2.37,-5.18 1.31,-6.81 0.24,-1.34 2.63,-1.51 2.05,-0.28 2.16,-1.06 7.75,0.04 3.38,2.22 0.21,1.55 0.62,4.19 -1.74,3.81 2.54,1.22 -0.16,3.54 1.44,4.41 -1.67,1.35 -1.15,4.11 -4.28,2.74 -2.59,1.02 1.31,-5.72 -1.84,-3.62 0.53,-1.15 1.43,-1.51 -0.56,-0.75 -0.57,-0.46 0.26,-1.17 0.24,-1.72 z",CO:"m 252.95,231.17 c -0.47,-0.82 1.85,-0.35 0.81,-1.95 2.5,-0.46 3.03,-2.26 4.08,-4.06 -0.63,-0.77 -0.28,-1.72 -0.65,-4.65 1.16,-2.12 -1.46,-4.1 -0.91,-6.37 2.5,1.42 -0.06,-4.87 1.91,-1.82 1.43,2.74 -0.39,-2.04 2.18,-2 2.56,-1.09 1.18,-3.33 2.83,-4.85 0.82,-1.6 2.69,-1.01 2.08,-0.06 1.3,-2.42 5.88,-1.87 7,-4.17 0.13,-0.9 4.12,0.24 1.1,1.26 -2.95,0.87 -4.41,5.94 -4.39,7.25 2.84,-0.06 0.85,5.72 3.84,6.14 3.15,-0.97 5.11,0.97 7.27,2.49 2.47,-0.41 6.43,-0.86 3.69,2.39 -0.77,2.77 2.44,5.6 0.14,6.98 2.41,-0.07 2.68,7.45 1.64,2.71 -0.67,-1.62 -2.32,0.65 -2.92,-0.01 -1.16,1.25 -5.74,-1.2 -4.46,2.22 2.38,-0.47 2.33,1.82 0.01,1.18 -1.81,2.49 3.12,5.09 0.67,8.27 -0.55,2.31 0.13,6.66 -2.61,4.32 -1.04,-0.94 3.03,-3.69 -0.15,-3.93 -2,-1.89 -6.61,2.13 -7.17,-1.54 -1.85,-1.51 -3.21,-3.79 -5.46,-5.19 -2,-0.1 -3.19,-1.6 -5.33,-1.01 -1.52,-1.26 -3.67,-1.92 -5.19,-3.56 z",MR:"m 424.61,177.22 c -0.18,-2.88 3.65,-1.21 5.47,-1.61 1.9,-0.07 3.87,0.15 5.71,-0.11 -0.74,-2.93 -0.09,-5.47 2.88,-6.22 0.32,-2.63 -1.73,-8.14 2.76,-6.78 2.11,-0.06 4.26,0.13 6.34,-0.11 0.26,-2.03 -0.54,-4.73 2.1,-2.35 2.48,1.6 7.23,3.93 8.25,5.27 -1.49,0 -2.98,0 -4.47,0 1.24,7.82 1.65,15.77 2.77,23.59 2.5,5.12 -5.53,2.42 -8.24,3.23 -2.38,-0.56 -5.24,0.51 -6.98,0.52 -2.36,-3.17 -1.71,3.69 -3.97,0.76 -2.4,-2.45 -4.97,-5.97 -9.03,-4.15 -1.92,-0.86 -2.62,2.9 -1.79,-0.59 2.27,-2.58 -0.3,-7.76 0.03,-8.08 1.44,-2.05 -1.06,-4.04 -1.7,-3.88 z",AO:"m 505.24,249.73 c 1.06,-2.15 4.38,-1.73 1.43,-0.01 -0.06,3.73 -1.21,1.68 -1.43,0.01 z m -0.75,34.43 c -0.43,-3.48 1.32,-6.18 1.89,-9.52 0.53,-3.17 4.76,-4.24 3.86,-7.99 -0.67,-2.52 -3.67,-6 -1.22,-6.6 0.42,-2.56 -3.79,-5.84 -2.23,-7.43 3.48,-0.43 8.63,-1.53 11.43,0.41 0.16,3.55 2.62,6.51 6.32,5.06 2.05,-0.06 0.7,-3.78 3.81,-3.01 0.94,0.03 0.46,1.77 2.4,1.03 3.94,-0.19 -0.04,5.91 2.74,7.85 0.65,1.46 -1.06,4.29 1.57,2.75 3.2,-0.66 3.98,-0.14 3.38,3.14 1.5,5.03 -7.79,-0.5 -5.61,5.38 -0.48,3.98 0.1,8.09 4.15,10 -3.67,0.81 -7.28,1.24 -10.98,0.58 -3.36,-1.38 -6.89,-1.35 -10.78,-1.21 -4.13,0.37 -6.81,-1.83 -10.75,-0.44 z",DE:"m 510.16,83.58 c 0.63,-0.95 1.7,1.39 0.28,0.42 -0.06,-0.51 1.11,-0.31 -0.28,-0.42 z m -1.91,-0.81 c 0.29,-1.17 1.74,-0.71 1.27,0.04 -0.42,0 -0.91,0.39 -1.27,-0.04 z m -13.09,-1.84 c -0.92,1.13 0.26,0.4 0,0 z m 0.4,0.28 c -0.93,0.06 0.94,0.06 0,0 z m 1.14,0 c -0.85,-0.17 0.1,1.88 -0.49,1.71 -0.35,0.34 2.73,2.01 2.47,1.98 -1.79,-2.3 -3.58,0.65 -3.51,0.01 -0.07,0.76 -4.12,-1.38 -3.41,0.7 1.74,0.46 -2.97,2.47 -0.29,2.83 -0.71,2.07 -4.39,0.32 -2.35,2.67 -1.72,1.83 1.3,2.48 0.02,4.05 0.58,1.79 2.43,2.59 4.8,2.63 1.09,-0.33 -3.22,5.13 0.68,4.01 1.11,-1.14 4.44,0.48 6.08,0.54 1.81,-0.1 5.13,-1.41 7.28,-0.3 -1.19,-1.95 0.91,-2.39 2.28,-3.45 -1.92,-1.32 -6.51,-4.24 -3.32,-4.71 2.63,-0.73 4.66,-1.76 6.27,-1.77 -0.5,-1.5 -0.12,-3.85 -1.78,-4.92 1.1,-1.9 -1.09,-3.73 -2.81,-4.21 -0.88,-0.97 -3.87,0.28 -1.14,-0.43 -2.35,0.63 -3.33,1.16 -5.29,1.44 -0.74,-0.78 1.55,-1.66 -0.99,-1.3 -2.62,0.04 -0.5,-1.54 -4.5,-1.47 z",SD:"m 570.73,170.57 c -2.63,0.94 -4.06,5.29 -7.24,3.16 -3.38,0.23 -4.32,-0.56 -7.36,0.02 -4.97,0 -9.94,0 -14.9,0 -0.25,2.4 1.29,6.69 -2.45,5.63 -1.09,3.03 0.86,9.72 -1.01,11.93 -3.25,-0.03 -3.01,4.18 -4.46,5.78 1.11,1.37 -2.43,3.26 0.78,2.88 -0.09,2.13 1.2,3.44 1.72,5.6 3.53,1.87 -0.67,6.52 3.46,5.66 1.28,-1.28 0.81,-3.4 2.08,-4.86 2.28,-3.07 2.9,3.17 5.53,1.67 2.07,-0.75 4.03,2.41 5.57,-0.39 1.7,-0.24 2.31,-3.24 4.22,-0.43 2.37,0.91 4.6,-3.2 4.67,-5.07 -0.08,-1.6 -1.33,-1.84 0.95,-1.89 0.3,-1.87 2.28,-0.18 1.05,1.27 -0.14,2.63 2.85,4.67 2.53,6.51 1.7,-0.03 1.07,-3.87 2.87,-2.76 -0.47,-3.3 3.08,-4.68 3.64,-6.92 1.05,-3.04 0.78,-6.4 1.91,-9.4 0.5,-2.13 7.1,-3.34 2.97,-5.26 -3.39,-1.34 -1.59,-6.55 -2.49,-8.22 -0.46,-2.5 -2.62,-3.15 -4.02,-4.91 z m -31.67,40.88 c -0.81,0.98 0.37,0.36 0,0 z",TH:"m 741.93,183.35 c 2.23,0.61 0.18,-4.66 3.68,-3.29 1.64,-0.78 3.08,-2.49 4.83,-1.43 -0.46,2.03 0.96,2 2.18,2.36 0.78,1.75 -2.19,7.33 1.32,4.33 0.93,-1.95 3.82,1.42 4.39,-1.39 3.6,-0.35 4.08,3.63 4.74,5.91 2.02,0.95 2.14,4 0.43,5.64 -2.29,-0.61 -6.2,-1.05 -7.24,1.99 -1.45,0.48 2.75,7.69 0.09,3.84 -0.68,0.12 -2.15,-2.05 -4.64,-1.33 1.64,-3.49 -3.45,-2.96 -2.56,0.21 0.04,3.19 -3.25,5.95 -2.05,9.21 2.48,-0.33 3.27,4.51 3.3,5.61 -1.87,-3.06 0.99,2.69 3.08,1.41 2.88,1.99 0.28,3.65 -1.46,2.95 1.04,-1.79 -2.61,-2.53 -2.74,-2.3 -1.12,-1.85 -1.8,-2.07 -3.03,-3.96 -0.79,-1.73 -2.47,0.52 -1.87,-2.29 -0.38,-4.12 5.04,-6.66 3.14,-10.45 -0.44,-2.75 -1.62,-4.86 -3.33,-7.09 1.02,-0.93 1.38,-2.68 1.89,-3.94 -0.71,0.38 -1.61,-2.78 -3.04,-3.56 0.11,-1.31 -0.76,-1.44 -1.12,-2.41 z",AU:"m 791.49,330.29 c 2.22,-0.61 0.81,-3.23 1.35,-3.26 0.11,-3.48 -2.66,-6.34 -2.72,-9.87 -1.04,-2.38 -3.46,-6.91 -3.96,-7.92 -0.32,-0.75 1.81,2.65 1.06,0.08 -1.89,-1.68 0.38,-2.2 0.04,-0.08 0.58,-1.11 1.32,2.13 1.48,-0.5 -1.84,-2.55 -3.18,-5.44 -1.22,-8.35 -1.21,-1.64 1.49,-5.18 0.83,-1.49 2.25,-2.24 5.68,-5.3 8.69,-5.12 3.16,-0.68 5.84,-2.23 9.27,-2.56 2.63,-0.92 3.32,-3.52 4.69,-5.04 -1.41,-1.75 2.24,-5.39 2.3,-3.34 1.25,2.77 1.12,1.55 1.8,0.85 1.47,-0.05 -1.6,-1.49 -0.25,-2.15 -0.62,-0.78 2.79,0.71 2.77,0.08 -1.8,-0.46 1.11,-1.31 -0.78,-1.83 0.35,-2.27 2.85,0.37 1.53,-1.36 -0.71,-0.63 2.12,0.16 0.7,-1.33 0.83,-0.19 1.58,-2.32 1.2,-0.37 1.82,0.48 0.79,-3.25 1.65,-1.29 0.67,-0.2 1.52,-0.17 2.1,-1.11 2.29,0.21 3.67,4.58 3,4.35 0.52,0.45 0.86,-2.72 2.97,-0.78 0.29,0.69 0.33,-1.37 1.29,0.37 -0.4,-0.52 1.58,-1.1 -0.06,-1.51 0.74,0.21 -1.69,-1.36 0.43,-1.8 -0.06,-1.85 2.25,-1.31 1.33,-2.93 1.48,-0.55 0.59,-2.1 2.15,-0.89 -0.94,-2.38 3.04,-0.45 4.05,-1.18 1.86,-0.94 -0.62,-2.21 -1.7,-2.65 1.28,0.17 0.77,-0.92 2.45,0.71 0.63,-0.77 2.6,1.36 3.21,0.84 1.4,0.66 3.93,1.65 4.79,0.4 1.96,-1.3 -0.68,1.49 1.03,0.73 1.07,1.52 1.41,-2.07 2.08,-0.07 1.34,0.47 -1.12,2.28 -0.55,2.66 -1.11,-0.21 -2.16,0.86 -1.46,1.97 -1.19,1.83 -2.53,3.38 0.37,4.36 2.4,1.97 5.27,3.02 8.25,4.58 2.66,4.45 7.1,-0.33 6.64,-4.26 0.64,-2.11 -0.6,-7.36 1.17,-7.41 -1.54,-1.68 0.84,-2.32 0.7,-5.3 1.91,-2.41 1.62,2.91 2.73,3.04 1.28,1.63 0.3,6.44 2.73,6.53 1.46,-0.82 3.69,1.89 3.32,3.67 0.61,3.17 2.37,4.32 2.29,7.52 0.2,1.62 1.99,3.14 3.41,2.71 0.65,1.97 2.53,1.64 3.8,2.69 0.47,0.26 -0.89,1.6 0.83,1.83 1.17,1.86 1.88,5.94 2.95,3.73 1.84,0.39 1.6,1.25 2.07,3.57 1.45,-0.32 0.24,1.18 2.14,1.3 1.76,1.71 3.34,4.29 4.26,5.55 -1.36,3.4 2.44,5.95 0.59,9.3 -1.26,3.24 -0.54,7.74 -3.5,9.71 -1.67,0.98 -2.22,2.93 -3.4,4.84 0.03,2.08 -1.93,2.42 -2.2,5.28 0.47,3.49 -1.89,4.72 -5.46,4.37 -1.69,0.74 -4.28,2.3 -5.05,2.95 1.38,0.61 -2.78,-0.25 -2.36,-1.91 -1.21,0.89 -2.66,0.73 -0.99,-0.26 -1.04,-1.73 -1.87,0.42 -1.43,0.41 -2.75,3.08 -5.83,-0.71 -8.88,0.26 -2.22,-1.14 -5.08,-2.7 -4.63,-5.8 -1.21,-1.52 -3.23,-2.95 -0.52,-0.82 1.39,1.17 -2.72,-2.42 -0.62,-1.09 -0.06,-2.24 -2.63,1.53 -3.09,-0.47 2.22,-1.63 -1.2,-5.85 -1.29,-1.34 -1.47,1.42 -3.85,0.01 -0.79,-0.32 -0.06,-2.15 2.52,-5.48 0.97,-6.19 -0.45,2.95 -4.21,3.34 -5.39,5.77 1.68,1.61 -3.7,-0.88 -1.29,-0.71 -0.16,-2.72 -2.99,-3.32 -3.23,-4.96 0.26,-0.87 -2.59,-1.94 -4.61,-2.05 -2.96,-1.22 -6.47,-1.72 -9.95,-0.76 -3.74,2.19 -8.2,0.83 -11.77,3.34 -2.38,0.79 -3.1,4.24 -6.11,2.78 -2.76,0.09 -7.41,-0.79 -9.12,1.7 -2.95,0.97 -5.53,2.7 -8.77,0.95 -0.79,-0.98 -5.26,-2.21 -2.32,-3.31 z m 90.47,17.83 c 1.93,2.47 0.52,-2.1 0,0 z m -11.14,0 c 1.48,1.07 0.1,-2.86 0,0 z m 9.64,7.83 c 0.39,1.2 1.83,-0.28 1.11,0.95 1.34,0.97 0.48,-4.13 1.87,-2.77 0.18,-2.25 -0.41,-4.84 -2.96,-3.2 -1.65,1.37 -5.44,-1.05 -7.36,-0.47 -0.42,2.13 3.78,5.1 1.59,4.31 -0.12,2.14 3.71,2.53 2.25,3.28 1.32,0.04 2.92,0.54 2.84,-1.03 0.68,0.92 1.13,-0.65 0.67,-1.08 z m -27.37,-18.87 c 4.04,-0.72 -1.41,-1.98 -2.43,-0.45 0.61,0.92 1.68,-0.14 2.43,0.45 z m -17.24,-69.43 c -1.28,-0.58 -3.73,-0.24 -0.99,1.44 1.71,0.6 2.21,-2.97 0.99,-1.44 z m -2.05,1.26 c 0.84,-1.66 -2.21,-0.8 -0.55,-0.1 z",PG:"m 878.44,241.91 c 0.38,-1.94 3.55,0.33 0.52,-0.28 l -0.27,0.12 z m 23.31,11.21 c 0.44,2.12 4.23,2 1.66,-0.11 -1.33,-1.54 -3.89,-3.41 -1.66,0.11 z m -6.43,-6.65 c -1.02,-1.69 -7.66,-4.86 -3.27,-2.06 2.63,0.3 3.1,3.93 4.06,4.53 0.86,-0.9 0.27,-2.19 -0.79,-2.47 z m -5.01,17.27 c 1.29,0.67 1.17,-0.09 -0.41,-0.7 l 0.25,0.34 z m -5.57,-11.37 c 2.18,1.61 6.09,0.82 7.63,-1.15 2.41,0.24 0.05,-1.92 2.11,-2.38 0.29,-2.22 -3.39,-1.69 -2.14,0.63 -1.15,0.47 -4.02,3.23 -4.25,0.64 0.41,-0.42 -1.53,2.45 -3.13,1 -2.21,-0.35 -2.21,0.62 -0.21,1.26 z m -21.82,8.94 c 0.3,-3.07 -0.44,-6.05 0.01,-8.89 0.2,-2.99 -0.51,-7.15 0.5,-9.44 2.67,1.59 5.78,2.02 8.42,3.39 2.57,0.46 4.08,3.15 5.01,4.82 1.45,0.26 7.22,2.26 4.05,3.42 -3.79,-0.23 1.5,3.53 1.92,4.33 0.26,1.95 2.05,2.12 3.05,2.48 -1.12,1.72 3.73,0.92 1.16,1.82 0.85,1.51 4.63,0.93 1.81,1.61 2.04,1.17 -1.89,0.41 -1.22,0.26 -2.15,-1.44 -4.95,-0.07 -6.75,-1.87 -2.29,-1.7 -3.02,-5.59 -6.68,-5.64 -1.76,-1.77 -1.54,0.89 -3.91,-1.05 1.21,1.3 0.71,2.14 -0.81,1.29 2.25,2.18 -6.1,0.35 -0.96,1.49 2.94,2.84 -3.46,2.3 -5.06,2.15 l -0.26,0.03 z",IQ:"m 579.52,141.76 c 3.35,-1.23 8.01,-3.06 6.94,-7.36 0.04,-1.98 3.23,-3.11 4.84,-3.85 2.48,0.07 3.33,0.76 5,0.95 -0.02,2.44 3.84,3.56 3.45,3.77 0.58,1.72 -1.45,2.49 -1.72,3.64 -0.51,1.59 1.97,2.82 2.09,4.02 2.7,0.67 5.14,2.81 4.32,5.55 0.65,0.26 3.36,4.41 0.38,2.48 -3.04,-0.74 -3.3,4.74 -7.03,2.77 -3.58,-0.26 -5.88,-3.65 -8.86,-5.36 -2.74,-1.21 -5.2,-3.27 -8.37,-3.12 0.26,-0.46 -0.96,-2.2 -1.04,-3.49 z",HR:"m 520.86,114.99 c 1.93,-0.01 2.97,2.11 0.72,0.38 -1.74,-0.36 -3.2,-1.21 -0.72,-0.38 z m -11.29,-7.28 c 2.63,-0.58 5.72,0.79 5.65,-1.9 2.54,-2.52 4.79,1.54 8.03,1.02 1.28,-0.8 1.93,2.07 2.02,1.81 -1.06,1.36 -4.28,-0.31 -6.63,-0.24 -1.29,0.48 -4.75,-0.22 -2.16,1.72 0.08,2.02 4.02,3.27 4.12,4.66 -1.62,-1.77 -6.82,-1.63 -6.22,-3.67 0.49,0.88 -2.49,-4.97 -3.21,-2.02 -0.96,1.61 -1.74,-0.94 -1.6,-1.37 z",GL:"m 350.08,66.85 c -1.04,-0.38 1,-0.77 2.1,0 -1.44,-0.58 -0.63,0.6 -2.1,0 z m -72.85,-47.31 c 1.2,0.31 8.15,0.27 3.75,0.53 -1.87,-0.15 -3.55,0.83 -0.86,1.06 1.35,0.34 5.81,1 6.17,0.53 -3.96,-0.92 2.94,-0.21 2.81,-0.39 1.66,0.42 4.52,-0.36 6.93,-0.46 3.5,0.33 7.71,0.64 10.68,1.36 0.53,0.15 4.8,0.51 2.25,1.44 1.17,0.27 4.31,1.26 4.77,1.66 -0.04,0.42 3.72,0.85 0.63,0.9 2.78,0.13 -0.9,0.33 1.2,0.41 -1.34,0.2 -4.2,0.74 -1.17,0.47 1.83,-0.47 1.52,0.57 3,1.12 -3.5,-0.1 3.32,0.98 0.44,1.23 0.2,0.59 -1.07,0.55 1.18,0.62 0.86,0.88 1.41,0.65 0.08,1.39 4.92,0.42 -4.31,-0.02 -0.48,0.47 3.36,-0.43 -3.71,1.92 0.82,1.18 3.77,-0.89 -6.22,0.85 -0.86,1.72 0.07,0.06 4.32,0.05 3.55,-0.77 1.03,-0.13 1.48,-0.84 0.16,-1.91 0.56,-0.73 1.05,2.76 3.04,0.96 -4.13,1.84 6.46,0.3 1.37,1.29 -5.64,0.62 6.11,-0.2 0.75,0.31 -3.73,1.41 4.05,-0.34 -0.45,0.78 2.56,0.1 4.76,0.57 0.64,0.35 2.44,0.47 4.16,0.73 1.98,0.92 0.37,0.07 2.86,0.24 0.86,0.19 2.05,0.68 -3,-0.32 -0.02,0.48 -0.73,0.11 2.39,-0.09 0.34,0.39 -2.79,-0.54 -9.21,-2.3 -9.88,-0.72 2.87,1.02 8.68,1.77 9.51,1.79 2.13,-0.68 2.67,0.57 0.72,0.84 2.12,-0 -2.01,0.41 0.87,0.35 0.74,-0.44 -3.56,1.46 -0.75,0.75 2.42,3.6e-4 -0.62,0.59 1.22,0.73 -1.18,-0.49 -3.54,0.02 -2.16,0.67 1.93,0.24 -3.47,0.94 -2.71,0.41 -1.98,0.07 0.72,0.04 -1.67,0.31 0.82,0.26 -3.9,0.81 -0.58,0.46 -1.15,0.35 5.51,0.55 3.23,-0.24 1.62,0.13 2.65,-0.58 0.87,0.49 2.03,0.31 -1.81,0.02 1.04,0.48 2.46,0.43 2.02,0.59 -0.03,0.17 -2.22,-0.06 -5.34,-0.54 -4.8,-0.28 -3.64,-0.76 -0.29,0.72 1.46,0.45 -1.66,0.11 -3.3,-0.49 -2.82,-0.03 -0.4,0.14 -3.02,1.14 -0.29,0.96 2.47,-0.86 5.66,-1.26 3.09,-0.46 1.96,0.56 1.48,-0.58 3.2,-0.4 -2.02,0.32 0.8,0.61 0.77,0.07 3.15,0.31 -4.4,0.6 -0.14,0.68 3.01,-0.57 -2.16,0.45 1.19,0.45 -0.94,-0.05 -7.09,-1.23 -7.74,-0.39 -2.6,0.41 -2.17,1.61 0.28,0.88 1.75,0.09 5.92,-0.72 4.59,0.2 1.54,0.42 3.58,0.01 0.77,0.39 -2.16,-1.22 -8.38,-0.3 -7.04,0.09 1.57,-0.21 -1.61,0.22 0.89,0.23 -3.99,0.14 3.61,0.25 -0.16,0.39 0.18,-0.45 5.35,0.02 1.5,0.07 -1.54,-0.11 1.33,0.22 -0.73,0.31 3.14,0.03 -3.4,0.19 0.51,0.59 3.74,-0.33 -5.46,0.38 -0.54,0.69 -3.31,1.06 2.91,-0.19 3.76,-1.33 1.7,-0.44 4.12,-1.25 4.01,-0.7 1.32,-0.07 -4.05,0.36 -0.24,0.42 -2.39,-0.51 -7.95,2.07 -7.65,2.35 -1.96,0.45 2.19,-0.23 -0.21,0.3 1.56,0.11 6.15,-0.7 2.21,0.16 -2.55,-0.08 -1.9,0.59 -0.59,0.43 -0.34,0.72 0.51,-0.06 0.44,0.86 1.23,-0.68 5.28,-1.43 4.94,-0.74 -1.83,-1.11 -6.89,1.86 -3.64,1.1 -1.78,2.76 4.25,-0.2 0.3,1.38 -1.43,1.79 1.29,1.85 1.96,0.1 0.04,0.88 2.46,-0.1 0.7,-1.29 0.52,0.38 2.46,1.52 2.4,1.49 0.24,0.24 2.44,1.69 -0.14,0.55 -1.82,-1.32 -1.75,0.17 -0.32,0.23 -2.2,-0.1 -1.68,0.91 -3.99,0.61 1.13,-0.08 -0.22,0.55 1.7,0.33 1.58,-0.67 2.43,-0.51 2.43,-0.26 -1.74,-0.19 -5.32,0.95 -2,0.68 -3.84,0.82 0.54,1 0.56,0.78 -2.51,0.25 1.34,0.26 -1.27,0.55 3.05,0.5 1.1,-0.32 0.53,0.6 0.51,0.39 3.05,-0.58 1.27,0.6 1.22,0.05 0.04,0.72 1.31,0.16 -1.85,1.31 1.85,-0.48 -0.52,0.92 0.11,1.44 2.65,0.9 2.58,1.43 -2.95,0.92 3.57,-0.74 -0.06,0.77 0.14,0.79 2.09,-1.25 0.62,0.38 1.12,-0.55 -0.32,0.77 1.3,0.29 -4.1,0.51 3.79,0.01 -0.63,0.5 -0.06,0.32 1.66,0.07 1.2,0.74 0.75,0.37 1.48,-1.01 0.94,0.2 2.49,-0.05 -2.28,0.69 0.27,0.36 2.28,-0.26 -2.45,0.8 0.68,0.53 2.38,-0.21 -1.41,-0.27 1.14,-0.36 2.09,0.46 3.09,-0.42 4.44,-1.05 -0.6,1.36 -0.31,0.74 1.2,0.51 0.81,0 -3.01,0.96 -2,0.88 -0.67,0.72 3.94,-1.14 0.83,0.32 -0.23,0.43 -0.42,0.26 0.19,0.32 0.53,0.59 2.01,-1.02 0.68,0.26 0.53,0.01 1.7,-0.49 1.79,-0.24 -2.69,2.52 2.84,-1.05 -0.42,1.05 -0.16,1.61 3.24,-1.26 2.66,0.15 0.99,1.17 2.71,-0.08 0.94,0.01 2.82,0.35 0.35,-0.67 0.62,-0.92 -3.5,-0.59 0.22,-0.12 0.32,-0.21 2.15,-0.08 -2.6e-4,0.1 1.51,-0.39 -2.62,0.46 1.39,-0.27 -1.76,-0.42 2.04,0.25 1.61,-0.04 -0.16,-0.31 2.71,0.35 1.81,-0.58 -0.35,-0.54 2.71,0.37 1.38,-0.32 1.98,-0.15 1.57,-0.24 -2.6,-0.56 0.41,-0.38 1.75,-0.81 -3.03,-0.74 0.6,-0.68 1.57,-0.69 -2.52,-0.6 0.29,-0.59 1.17,-0.78 -0.56,-0.25 0.15,-1.5 -2.26,-0.93 -1.46,-0.34 0.46,-0.25 -0.23,-0.41 -4.35,-1.23 -1.13,-0.86 -0.38,-0.79 1.85,-0.54 0.49,-3.6e-4 1.37,-0.35 2.79,-0.62 0.66,-1.38 0.53,0.62 2.37,0.47 0.8,-0.03 2.98,1.02 -1.49,-1.48 1.15,-0.22 2.42,0.74 -1.98,-1.28 0.89,-0.09 -2.35,-0.7 3.25,-0.24 -0.72,-1.26 -1.15,-0.26 3.49,0.66 2.12,-0.6 -0.26,-0.82 -4.04,-0.72 -0.74,-1.02 3.71,0.7 -2.99,-2.72 1.28,-2.03 1.85,0.37 1.92,-1.01 1.14,-1.37 1.84,-0.42 3.25,-0.05 4.2,-0.36 2.05,0.4 -0.95,-1.43 1.26,-0.75 1.08,-0.91 -0.82,-1.61 1.16,-1.17 3.26,-0.42 -2.51,1.33 0.84,1.41 1.25,-0.64 2.63,0.03 4.93,-0.79 -1.93,-1.74 1.8,0.28 1.33,-0.61 2.18,-0.69 2.47,-0.97 3.61,-2.01 1.42,-0.93 1.89,-2.45 4.67,-2.36 0.28,-1.12 -0.69,-0.69 -0.63,-1.24 -1.23,-1.48 2.23,0.71 2.62,0.73 1.9,0.22 2.23,-0.23 2.32,-0.37 2.16,0.94 0.29,-1.07 2.9,-0.12 2.72,-0.54 5.43,-0.92 8.68,-1.16 2.84,-0.84 2.79,-1.58 4.89,-1.64 -1.03,-0.38 1.71,-0.69 1.87,-0.75 1.81,-0.33 -0.57,-0.64 1.71,-0.73 1.91,0.12 -0.57,-0.25 1.88,-0.31 2.99,-1.05 -4.32,-0.11 -5.76,-1.1 -2.97,-0.84 -6.86,1.37 -10.57,0.58 1.49,0.1 5.77,-0.51 5.57,-1.1 -1.94,-0.07 -6.85,0.28 -6.8,0.04 1.81,0.5 2.94,-1.78 2.21,-1.44 2.37,0.44 -0.22,-1.23 2.17,0.04 2.18,0.75 8.25,-1.6 2.87,-1.53 -2.29,-0.11 -3.09,-0.46 -5.68,-0.24 1.53,0.18 4.4,-0.39 1.22,-0.92 -0.07,0.32 -3.32,-1.14 -0.41,-0.33 3.63,2.13 9.05,0.12 11.64,3.95 1.84,1.75 4.21,-0.84 4.06,0.14 2.19,1.31 2.66,-0.71 1.88,-0.67 1.32,-0.91 -2.71,-0.71 0.22,-1.01 -0.69,-0.56 0.6,-1.07 -1.71,-0.59 -1.71,0.08 2.65,-1.75 -0.43,-0.81 0.18,-0.31 -2.77,0.14 -0.58,-0.59 -0.57,-0.82 -7.23,-1.51 -7.57,-1.55 -3.69,-0.03 4.51,0.2 0.78,-1.14 -2.45,0.05 -4.43,0.71 -4.44,-0.01 -5.34,-0.35 6.43,0.45 3.84,-0.79 -2.4,-0.56 -9.52,0.6 -3.87,-0.21 1.44,-0.47 -4.17,-1.17 -0.64,-0.56 1.58,0.85 6.31,-0.26 5.6,-0.61 -2.53,-0.68 -1.86,-1.31 0.15,-0.07 2.26,-0.72 4.76,-0.11 7.26,-0.03 -1.4,0.19 -7.18,-0.69 -3.04,0.55 2.61,0.32 7.08,0.96 8.43,-1.28 -2.06,-0.86 -4.62,0.06 -4.37,0.11 -0.93,-0.88 -2.16,-0.27 -0.58,-0.98 -2.68,-0.48 2.73,-0.08 0.09,-1.04 2.95,-0.04 6.03,1.67 8.79,0.26 -1.23,-0.81 -5.75,-0.36 -5.19,-0.49 2.25,-1.81 -4.72,-0.52 -3.67,-1.25 1.91,0.75 6.46,-0.28 2.18,-0.79 -1.54,-0.13 -3.22,-0.44 -0.55,-0.2 -4.3,-1.32 3.43,0.42 4.74,0.88 3.79,-1.39 -2.77,-3.43 -4.94,-2.39 -2.01,0.21 8.54,-0.13 3.5,-0.84 -2.11,-0.34 -2.46,-0.06 -0.28,0.36 -2.01,-0.29 -4.7,-0.75 -5.12,-0.95 2.34,0.01 0.16,0.12 1.47,-0.13 -0.75,-0.08 -3.92,-0.39 -1.14,-0.95 1.86,1.32 4.87,-0.52 1.21,-0.1 1.84,0.08 2.89,-0.01 2.47,-0.29 2,-0.79 7.37,2.12 7.18,-0.63 -2.26,-0.27 -4.39,-0.53 -6.74,-0.6 3.04,-0.01 0.35,-0.22 -1.06,-0.35 1.75,0.29 2.76,-0.31 0.28,-0.35 1.9,-0.2 7.71,0.39 2.93,-0.57 -2.53,-1.39 -5.38,1.85 -5.7,0.15 2.25,-0.98 -0.23,-0.77 1.63,-1.07 -0.15,-0.9 1.72,-1.28 -0.04,-1.17 1.23,-0.87 4.18,-0.78 6.45,-1.87 -4.29,0.69 0.27,-2.19 2.55,-1.44 4.73,-1.19 -2.21,-0.94 -4,-0.69 -1.73,0.37 -1.09,1.09 -3.02,0.61 1.88,-2.36 6.92,-0.48 10.29,-1.24 2.63,-0.27 2.82,-1.16 0.12,-0.99 -3.84,0.35 -7.67,-0.47 -11.49,0.12 1.35,-0.64 6.81,0.21 9.63,-0.74 2.17,0.36 7.34,0.16 7.65,-0.63 0.66,-0.8 5.84,-0.44 7.96,-1.82 -5.09,-0.65 -10.21,-1.31 -15.34,-0.63 0.5,1.64 -5.67,0.8 -7.6,0.93 1.55,-1.01 -3.99,0.56 -5.15,0.88 -2.91,0.58 -9.45,3.25 -4.08,0.7 2.06,-0.33 6.93,-3.18 2,-3.26 -3.57,0.24 -6.75,1.75 -10.45,1.85 -1.74,0.31 -2.71,0.34 -2.34,0.02 -0.92,-0.89 8.17,-0.04 4.82,-1.7 -6.82,-0.18 -13.65,0.3 -20.43,1.09 1.29,-1.99 7.63,-0.03 7.69,-1.44 -1.17,-0.48 -5.4,0.08 -1.92,-0.16 5.02,0.28 10.05,0.12 15.07,0.12 3.88,-0.52 7.9,0.06 11.65,-1.2 -3.81,-1.07 -6.85,-0.48 -9.93,-0.81 -1.28,0.3 -3.97,0.5 -1.1,0.11 3.16,-0.86 -2.93,-1.11 -4.29,-0.71 -4.73,0.17 -9.47,-0.2 -14.19,0.31 -2.97,0.04 -8.63,0.48 -9.42,0.46 -0.02,-0.8 7.39,-0.26 7.08,-0.72 -3.68,-0.42 3.37,0.34 4.75,-0.09 4.97,-0.37 9.98,0.22 14.94,-0.42 -2.84,-1.02 -6.66,-0.36 -8.36,-0.47 0.57,-0.19 -5.13,-0.42 -7.56,-0.34 -2.96,-0.3 -6.86,0.05 -7.72,0.04 -1.9,0.07 -6.83,-0.34 -6.75,0.27 0.56,0.48 -3.85,-0.53 -5.26,0.09 -2.91,0.78 7.57,0.72 2.53,0.92 -1.77,-0.1 -4.34,0.14 -1.2,0.16 5.43,0.03 -6.63,-0.4 -1.37,0.55 -2.92,-0.43 -7.67,-0.49 -9.54,-1.07 -2.13,-0.51 -4.02,-0.02 -3.22,0.06 -3.05,-0.15 -1.17,0.15 0.76,0.27 -1.87,-0.12 -7.22,-0.27 -6.87,-0 1.18,0.23 5.1,0.61 1.77,0.37 -2.62,-0.36 -6.01,-0.3 -4.16,0.1 -4.03,-0.01 4.56,0.54 3.64,0.07 4.58,0.27 9.38,-0.28 13.81,0.87 1.25,1.43 -6.53,-2.03 -4.74,0.28 -2.72,-1.55 -9.86,-0.82 -10.47,-0.51 3.59,0.54 4.96,1.12 8.47,1.27 -2.19,0.13 -6.47,-0.76 -6.46,0.45 -1,0.4 2.84,0.84 -0.35,0.81 -5.07,-0.68 -10.07,-1.94 -15.2,-2.2 -4.51,-0.43 0.15,1.93 2.23,1.74 -1.98,0.59 -6.22,-0.42 -2.07,0.51 2.77,0.96 -4.34,-0.76 -6.01,-0.63 -1.58,-0.79 -4.66,3.04 -3.5,-0.27 -4.27,-1.74 -7.34,0.23 -12.12,-0.21 -1.77,0.05 -5.97,0.4 -1.97,0.77 1.62,0.83 8.52,1.78 3.67,1.17 -3.45,-0.49 -6.83,-1.99 -10.36,-1.1 -1.06,1.27 2.02,0.82 -0.23,1.66 1.43,1.67 -5.11,-0.83 -5.7,0.73 3.34,1.97 -2.2,-1.42 -3.72,0.26 -2.24,1.25 -6.51,0.43 -7.63,2.45 2.8,0.82 8.22,0.08 9.32,0.11 -2.38,-0.55 -2.49,0.99 -2.92,2.13 -3.75,1.08 -7.7,0.47 -11.5,1.22 0.33,0.33 -4.54,0.64 -6.12,0.92 -3.19,-0.23 -6.62,1.43 -2.07,1.86 2.43,0.3 2.68,0.56 5.74,0.22 -4.72,0.85 3.75,0.18 -0.18,0.7 -0.47,0.48 3.6,0.41 4.41,-0.03 1.72,1.03 6.5,-1.24 6.2,0.47 -2.28,-0.14 1.31,0.89 -1.32,0.41 -2.01,-0.73 -9.17,0.17 -3.66,0.3 1.66,-0.01 7.67,0.03 2.86,0.1 -3.86,0.07 -7.78,-0.5 -11.6,0.14 -0.82,0.87 2.96,0.21 0.63,0.57 0.94,1.34 5.75,-1.79 2.71,0.38 z m 132.2,12.52 c -1.59,-0.6 3.65,0.15 -0.05,-0.84 -1.37,-1.04 -7.96,-0.86 -3.67,0.54 0.65,0.18 6.83,1.56 3.72,0.3 z m -8.88,3.59 c -1.57,0.39 -6.63,0.59 -6.22,1.84 1.86,-0.25 9.2,0.01 6.19,-1.58 z m 19.57,-14.37 c -0.09,-3.96 -2.53,0.26 0,0 z m 1.99,2.22 c -1.38,-0.31 -4.15,1.28 -1.17,1.05 2.3,0.21 4.03,-0.09 0.81,-0.51 l 0.75,-0.25 z m -5.73,1.9 c 1.97,-1.23 -4.11,-0.66 -0.42,0.09 z m -2.69,0.78 c -1.92,-0.24 -3.67,0.98 -0.63,0.98 2.3,0.11 4.16,-0.43 0.63,-0.98 z m -2.67,4.26 c -1.66,-0.74 -7.55,-0.67 -6.21,-0.02 2.38,-0.41 6.14,1.58 6.21,0.02 z m 7.04,-16.54 c 0.81,-0.83 -2.47,0.39 0.48,0.07 z M 340.48,3.07 c 2.14,0.65 6.97,1.81 6.72,0.61 1.52,-0.55 -3.67,-1.42 -5.09,-1.18 -0.89,0.06 -4.14,0.09 -1.63,0.57 z M 330.4,38.95 c -2.3,0.49 0.91,1.63 0.43,0.12 z m -10.12,-0.22 c 2.72,0.25 -2.42,0.09 0.62,0.9 -3.41,-0.35 2.03,0.77 2.01,0.48 2.87,-0.13 -3.74,0.24 -0.05,0.7 1.7,0.29 7.63,-1.19 3.07,-1.89 -0.96,-0.54 -6.82,-2.06 -5.64,-0.19 z",NE:"m 472.53,195.05 c -0.75,-2.18 2.68,-1.53 4.08,-2.47 2.43,-0.56 5.27,0.81 6.33,-1.99 2.35,-2.68 -1.72,-9.21 3.14,-9.31 3.87,-0.56 5.85,-4.74 9.41,-6.21 3.12,-1.84 6.25,-3.67 9.42,-5.39 3.3,-0.57 6.46,3.04 8.67,1.73 -0.4,3.12 2.65,5.56 2.09,8.04 -1.29,3.47 0.67,8.09 -2.66,10.59 -1.98,1.89 -4.03,5.09 -3.75,6.99 -2.89,3.51 -7.48,-0.93 -10.4,2.34 -2.67,0.41 -5.33,-2.05 -7.76,-0.39 -1.36,-2.74 -6.89,-3.28 -7.81,-0.17 -1.64,1.53 -0.88,5.55 -3.1,2.2 -1.66,-0.94 -1.67,2.51 -2.44,-0.17 1.27,-1.19 -4.63,-1.04 -2.67,-2.69 0.23,-0.28 -2.57,-1.65 -2.55,-3.1 z",DK:"m 495.21,76.19 c -0.44,-2.15 3.9,-0.81 4.89,-2.71 2.49,-0.81 0.08,2.94 -1.9,1.63 -1.84,-0.67 -2.84,1.45 -2.99,1.08 z m 7.13,5.32 c 2.8,1.22 2.68,-0.62 0.13,-0.32 l 0.22,0.24 z m 0.3,-1.57 c 2.3,0.37 1.55,1.39 2.62,1.82 -0.59,-1.14 1.46,-1.07 0.72,-2.45 2.45,-2.14 -2.03,-1 -0.8,-0.55 -0.37,-0.06 -0.84,0.36 -1.21,-0.54 -0.05,0.27 -2.79,0.52 -1,1.57 l -0.08,0.11 z m -3.83,-0.45 c 0.48,2.01 4.2,1.24 2.49,-0.3 -0.23,0.82 -1.44,-0.23 -2.49,0.3 z m -4.36,-3.04 c 0.99,0.09 3.68,-0.25 2.92,-0.28 0.65,-2.73 4.5,0.4 2.46,-7.2e-4 0.21,0.8 1.86,-0.2 2.24,1.05 -1.31,0.45 -1.95,0.52 -2.47,1.41 0.41,0.8 -2.47,-0.01 -0.44,0.54 -1.25,0.79 -0.42,2.08 -0.54,2.11 -3.23,0.8 -2.36,-1.44 -3.91,-1.8 -0.76,-1.62 1.08,-0.89 -0.39,-1.78 l -0.02,-0.43 z",LV:"m 530.16,76.64 c 0.3,-2.33 4.87,-5.24 7.05,-1.36 3.35,0.59 1.1,-3.97 4.66,-2.94 1.99,1.68 6.31,0.33 7.07,2.64 -0.07,0.92 2.46,2.76 -0.51,3.69 -2.9,0.81 -5.39,-1.02 -8.03,-1.68 -3.25,0.69 -7.16,-0.89 -10.12,0.76 -0.3,-0.29 -0.22,-0.77 -0.13,-1.11 z",RO:"m 528.08,105.9 c 3.05,0.28 3.81,-3.14 5.89,-4.6 2.82,-2 6.18,0.64 9.05,-0.62 2.26,-0.8 4.32,-0.99 5.44,1.71 3.09,1.12 -0.25,6.94 3.88,5.75 3.2,-1.84 1.01,3.23 0.12,1.24 -0.87,-0.78 -0.25,1.39 -0.65,0.71 0.2,0.38 -0.75,3.91 -2.79,1.87 -3.58,-1.5 -6.51,1.86 -10.13,0.74 -2.41,-0.32 -4.26,-0.29 -4.36,-2.31 0.71,-0.32 -4.34,0.19 -3.1,-1.71 -2.07,-0.49 -1.75,-1.75 -3.35,-2.78 z",ZM:"m 532.92,281.12 c 0.13,-2.93 -0.39,-6.24 0.25,-8.95 3.23,0.4 6.48,0.56 5.29,-3.6 -0.37,-3.2 0.74,-1.97 1.41,-0.82 2.72,-1.79 2.32,2.13 5.56,1.48 1.49,-0.27 2.14,-1.59 2.94,0.77 2.67,-0.14 3.74,4.3 5.71,3.22 1.04,0.13 0.76,-5.12 -0.93,-2.58 -4.74,-1.4 -0.18,-6.33 -2.57,-8.87 1.47,-3.13 5.81,-3.76 8.61,-1.69 1.4,1.36 4.69,1.44 5.11,3.68 2.05,1.86 -1.93,4.4 0.56,6.69 -1.68,0.43 -3.04,4.51 -1.09,4.43 -0.87,2.05 -9.5,0.98 -7.63,4.68 -3.81,-0.6 -4.39,3.14 -7.48,4.27 -1.36,3.74 -5.32,2.41 -7.91,1.2 -3.67,0.48 -5.88,-0.51 -7.86,-3.9 z",IR:"m 594.01,124.87 c 2.29,-2.79 4.55,3.52 7.72,0.9 1.39,-2.34 5.8,-1.64 3.41,0.44 2.62,1.02 2.25,4.53 5.71,4.22 2.37,2.95 6.36,2.25 9.56,1.4 2.1,-0.3 -0.77,0.55 1.31,0.14 -0.81,-2.08 2.61,-1.48 3.62,-3.28 2.96,-0.75 5.14,-0.76 7.93,0.75 3.37,-0.31 4.9,3.68 8.23,3.11 0.61,2.16 -0.29,4.89 -0.85,6.38 -2.57,1.29 1.27,2.5 -0.69,3.99 0.86,2.3 0.22,4.85 3.38,4.77 -0.06,2.59 -4.49,3.82 -0.92,6.16 1.33,2 4.63,2.02 3.94,5.11 3.5,1.76 -3.36,1.63 -3.11,3.85 -0.01,3.99 -3.48,0.77 -5.33,1.49 -2.6,-0.63 -5.52,-0.09 -7.42,-1.84 0.25,-4.75 -4.26,-2.89 -6.48,-1.43 -3.79,0.01 -5.73,-3.2 -9.13,-3.81 -1.53,-1.87 -1.89,-3.1 -2.65,-4.49 -1.02,-2.53 -2.21,-1.34 -3.9,-2.11 -1.26,-0.45 0.73,-1 -0.75,-0.49 -0.64,3.09 -2.51,-1.04 -3.15,-1.73 0.77,-2.94 -2.24,-4.73 -4.63,-5.73 -0.52,-1.35 -2.98,-3.72 -1.24,-4.84 1.63,-0.71 0.92,-2.55 1.48,-2.98 -2.84,0.39 -3.19,-2.43 -4.25,-4.34 -1.58,-1.89 -0.35,-3.31 -1.81,-5.64 z m 32.76,35.03 c -3.1,1.47 -0.07,0.94 1.27,-0.07 -0.42,-0.38 -0.83,0.25 -1.27,0.07 z",MM:"m 727.56,176.02 c 0.68,-1.15 1.81,0.25 1.16,-2.16 1.11,-0.04 2.07,-0.87 1.49,-3.05 1.46,-0.74 -0.37,-4.26 2.76,-2.25 1.11,-2.81 2.28,-4.89 2.84,-7.7 1.65,-1.14 4.22,-2.64 5.53,-1.71 -1.55,-1.04 1.2,-3.42 1.57,-3.51 1.76,1 1.47,2.48 2.85,3.12 0.44,2.99 -0.31,4.89 -2.55,5.84 -0.63,1.41 -1.49,5.21 1.31,3.17 2.75,-1.17 0.15,3.21 3.26,3.06 -0.29,1.23 -1.55,3.36 1.37,2.79 -0.36,2.73 2.63,0.88 3.27,1.29 -1.48,1.93 -2.69,3.05 -4.53,3.93 -1.35,0.72 -2.76,1.49 -4.36,1.72 -0.34,1.57 -0.81,3.14 -1.38,3.4 1.11,1.68 3.34,5.13 4.09,5.85 -1.37,2.06 -3.07,4.4 0.17,6.29 0.35,2.99 3.27,6.53 0.02,9.02 -0.88,1.05 -1.51,3.79 -1.45,0.58 1.47,-2.49 0.58,-2.87 0.55,-4.18 0.45,-0.21 -0.96,-5.69 -1.22,-4.71 -0.39,1.23 -0.56,-3.14 -1.2,-3.34 -0.63,-2.51 0.31,-4.23 -1.54,-5.18 -0.73,-2.35 -0.82,-1.04 -1.53,0.65 -1.53,0.73 -1.12,-1.58 -1.29,0.67 -1.3,0.4 -2.69,2.31 -2.68,1.26 0.14,0.34 -0.59,0.2 -0.29,-0.59 -1.4,2.94 0.08,-2.26 -1.25,0.8 0.28,-1.45 -0.56,0.59 0.12,-1.92 -1.99,3.38 -1.02,-1.22 -0.38,-2.76 -0.76,-2.36 -1.51,-6.26 -2.23,-4.05 -2.15,-1.88 1.58,-0.27 0.06,-2.15 -0.67,-0.85 -2.15,-1.08 -1.94,-0.82 -0.43,0.34 0.3,-2.48 -0.88,-0.57 0.26,-1.72 -0.85,0.45 -1.44,-2.03 z",ET:"m 563.4,213.45 c 0.41,-2.63 4.67,-0.69 3.05,-4.4 0.31,-2.14 0.94,-4.2 2.39,-4.16 -0.44,-3.5 3.54,-4.53 3.9,-7.41 -0.38,-2.92 3.11,-2.05 3.6,-2.06 0.25,-3.07 2.69,0.6 4.13,-0.79 3.42,-0.22 6.63,2.85 8.8,5.71 -1.22,1.87 -2.62,5.61 1.23,4.23 -0.31,2.08 2.43,6.23 5.78,6.5 2.36,1.18 7.39,2.1 8.26,2.46 -2.82,2.52 -5.32,5.37 -7.89,8.12 -3.82,-0.82 -6.13,2.51 -9.57,2.78 -1.89,-0.93 -4.24,-0.87 -5.48,1.61 -3.65,0.27 -5.98,-3.07 -9.52,-2.92 -1.46,-0.87 -0.49,-3.09 -2.49,-2.94 -1.11,-2.91 -3.05,-5.97 -6.2,-6.73 z",GT:"m 216.08,194.7 c 0.19,-3 1.54,-5.27 4.97,-4.34 0.45,-2.13 -4.34,-2.79 -1.53,-3.79 -0.1,-2.22 3.45,-0.63 5,-1.07 -0.3,2.68 -0.01,6.85 2.21,5.6 0.96,0.96 -2.99,1.76 -2.22,3.57 -1.96,2.49 -4.96,2.64 -7.74,0.59 l -0.35,-0.28 z",SR:"m 311.83,225.6 c -1.86,-2.17 -0.51,-4.3 1.36,-4.72 -1.09,-3.3 3.77,-1.06 3.81,-1.81 1.55,-0.63 2.77,0.68 2.53,-0.34 5.17,-0.24 -0.95,4.68 2.55,6.9 -0.55,1.82 -0.99,4.41 -3.31,2.84 -1.51,0.27 -2.85,0.17 -2.08,1.85 -3.33,0.41 -2.96,-3.89 -4.86,-4.72 z",EH:"m 424.61,177.22 c -0.57,-3.33 2.36,-5.36 3.44,-8.22 -0.84,-0.49 3.7,-3.2 3.23,-5.89 1.56,-1.8 3.26,-3.74 4.73,-5.39 3.91,0.25 7.93,-0.07 11.78,0.22 0.81,3.33 0.01,5.55 -3.6,4.57 -1.85,0.06 -3.74,-0.12 -5.56,0.07 -0.71,2.52 1.86,7.31 -1.96,7.8 -2.24,1.66 0.93,6.76 -3.79,5.24 -2.65,0.12 -5.38,-0.24 -7.97,0.18 -0.34,0.39 -0.36,0.95 -0.29,1.43 z",CZ:"m 505.43,94.08 c 1.9,0.8 5.69,-2.65 7.09,-1.6 1.57,-0.48 3.15,0.71 4.83,0.81 -1.65,0.84 2.5,2.19 1.44,0.45 1.85,0.84 4.05,0.97 5.27,2.63 -2.52,1.33 -4.66,2.74 -7.41,2.08 -3.07,-1.86 -4.49,2.17 -7.59,-0.68 -2.06,-0.99 -2.46,-2.18 -3.63,-3.69 z",TD:"m 509.24,195.08 c 1.48,-2.82 4.26,-5.02 5.68,-7.87 -0.28,-3.18 1.06,-6.8 0.67,-9.54 -1.33,-2.22 -3.92,-7.42 0.52,-7.93 3.16,1.08 6.13,3.02 9.28,4.31 4.31,2.16 8.65,4.26 12.85,6.62 0.46,2.85 0.05,5.99 0.22,8.96 0.87,3.04 -3.56,0.7 -3.31,3.81 -1.45,2.08 -1.68,4.21 -2.39,6.6 2.59,-0.38 0.77,2.95 2.83,3.93 -0.68,1.21 -3.59,1.09 -4.14,3.15 -1.42,2.85 -4.89,3.04 -7.1,3.71 1.33,2.52 -5.01,2.76 -6.23,3.16 -0.82,-0.61 -3.7,1.92 -3.59,-1.07 -0.59,-1.8 -6.5,-5.42 -1.71,-5.29 2.76,0.24 2.38,-0.04 0.94,-2.07 -0.03,-2.51 0.04,-5.91 -2.67,-6.71 -1.21,-0.75 -1.72,-2.4 -1.86,-3.75 z",AL:"m 525.44,121.31 c 1.54,-1.75 -0.99,-4.9 1.34,-5.31 2.94,-0.14 1.39,3.01 2.7,4.49 1.52,0.75 -1.08,3.52 -2.05,3.53 -1.17,-2.05 -1.54,-1.21 -1.99,-2.71 z",FI:"m 529.12,41.38 c 2.56,-2.1 5.75,2.56 9.33,0.6 2.86,1.35 4.92,-0.31 6.08,-2.55 2.54,-1.03 6.96,-1.07 8.41,0.84 -1.36,0.95 -0.21,1.5 -2.17,1.55 1.62,-0.03 -0.92,2.36 2.03,2.15 3.51,0.77 1.39,2.16 -0.08,3.59 1.4,1.31 3.55,3.09 1.83,3.8 -0.44,1.18 -0.66,1.88 1.11,2.32 -1.82,0.82 3.3,1.44 0.04,2.38 -1.35,0.88 6.42,1.82 2.5,3.91 -3.11,2.05 -6.4,3.94 -9.71,5.53 -2.85,-0.3 -4.48,0.39 -6.26,0.69 -2.51,0.7 -6.01,0.85 -6.38,1.06 1.01,-0.54 -0.75,-1.1 -0.29,-1.29 -1.71,-0.09 -5.49,-0.39 -3.9,-2.99 1.08,-0.93 -2.85,-4.07 0.38,-4.7 -0.66,-0.96 2.72,-0.7 2.07,-1.3 2.69,-0.72 4.69,-3.33 7.68,-3.76 1.73,-0.72 -1.1,-2.71 -3.01,-2.98 -2.52,-0.88 0.57,-2.67 -1.37,-3.73 0.8,-0.59 -1.01,-1.45 -0.05,-2.2 -2.56,-1.37 -5.52,-1.7 -8.24,-2.91 z m -2.08,25.16 c 0.52,-0.52 1.92,-0.26 -0.15,-0.81 1,1.23 -1.72,-0.5 0.15,0.81 z",SY:"m 570.71,142.04 c 1.24,-0.86 4.03,-3.39 1.3,-3.78 -0.74,-1.31 -1.53,-4.12 0.8,-3.94 0.49,-0.93 0.77,-0.66 0.69,-2.15 3,-0.13 6.21,0.39 9.34,-0.18 1.85,-0.58 5.4,-1.41 6.57,-0.64 -1.6,1.39 -3.85,1.65 -2.88,4.25 0.05,4.11 -4.64,4.87 -7.54,6.57 -2.31,1.62 -7.03,3.78 -8.2,0.71 l 0.05,-0.54 z",KG:"m 664.63,122.89 c 2.3,-0.76 2.91,1.05 4.92,-0.68 2.16,0.88 2.87,-0.65 4.86,-1.21 -0.49,-0.36 -3.37,-1.38 -3.76,-1.96 -0.9,1.93 -6.12,-0.69 -2.2,-1.54 2.08,-0.21 -1,-1.68 1.71,-2.16 2.49,-1.24 5.94,2.82 6.18,-0.91 3.03,-0.69 5.82,1.61 9.09,0.29 3.21,0.13 6.71,0.36 9.06,2.49 -3.42,1.01 -6.11,3.32 -9.58,3.35 -1.69,3.44 -4.52,-0.76 -6.55,2.11 -2.02,0.38 -1.34,2.99 -4.23,2.23 -2.52,0.68 -3.84,-0.77 -6.29,-0.4 -0.74,-0.83 -5.5,1.09 -3.22,-1.61 z",SB:"m 920.68,265.88 c 4.3,0.98 -3.81,-3.21 -0.58,-0.43 z m -2.7,-4.52 c 2.63,3.35 0.38,-2.51 -0.68,-2.14 0.29,0.7 0.26,1.5 0.68,2.14 z m -2.42,1.9 c 3.89,0.96 1,-1.56 -0.98,-1.53 -0.68,0.61 0.43,1.39 0.98,1.53 z m -2.42,-4.8 c 3.82,2.44 0.25,-1.32 -1.78,-1.66 0.4,0.71 0.97,1.41 1.78,1.66 z m -3.66,1.29 c 1.31,-1.32 -2.71,-1.95 -1.05,-0.8 0.66,-0.18 0.38,0.8 1.05,0.8 z m -2.61,-4.1 c 5.19,3.39 -3.75,-4.46 0,0 z",OM:"m 616.21,182.18 c 4.03,-1.59 9.69,-1.6 9.78,-7.19 0.72,-2.21 -2.22,-3.76 -0.06,-6.07 -0.12,-1.28 2.14,-0.6 0.67,-2.41 1.15,-3.21 3.41,3.36 6.79,2.52 1.82,0.51 4.03,2.77 4.29,4.28 -1.6,1.93 -3.06,5.18 -5.09,4.88 -1.36,1.73 0.91,4.9 -2.5,4.41 -1.15,1.87 -2.49,2.84 -4.65,3.28 -0.26,2.97 -4.49,1.94 -6.41,2.5 -0.82,-2.08 -1.92,-3.93 -2.82,-6.21 z m 11.24,-19.87 c 1.9,-2.52 0.3,3.26 0,0 z",PA:"m 241.78,212.35 c 1.11,-1.13 -0.69,-4.84 1.73,-3.09 -0.08,1.32 1.78,1.39 1.57,0.83 3.02,2.42 6.44,-3.33 9.12,-0.92 2.98,0.48 4.49,4.97 2.09,5.28 -1.41,2.51 -2.85,-4.05 -0.02,-1.68 -1.8,-1.71 -1.46,-0.33 -3.65,-2.48 -1.61,-0.09 -3.23,1.89 -3.78,2.6 3.24,1.84 -2.42,3.88 -1.76,0.51 -0.87,1.82 -2.54,-1.89 -4.53,-1.17 -0.78,0.1 -0.34,1.43 -0.77,0.11 z", AR:"m 281.51,383.73 c 2.4,1.32 -0.98,1.37 1.51,2.11 1.77,2.42 5.58,3.4 8.14,3.74 -2.66,1.38 -6.38,0.26 -9.44,0.18 -0.35,-1.88 -0.25,-4.06 -0.2,-6.04 z m -3.74,-65.8 c 1.13,-2.35 2.71,-5.43 4.55,-6.82 -0.44,-2.38 -1.63,-6.25 1.48,-7.3 3.3,-1.08 0.72,-5.21 4.14,-6.15 0.57,-1.26 2.94,0.69 4.48,0.41 1.12,2.15 1.13,1.78 2.01,-0.28 3.81,-1.96 5.04,2.92 7.84,4.31 3.17,1.67 7.5,2.66 9.95,5.27 -0.97,1.96 -5.1,5.98 -0.45,5.17 2.5,0.64 4.03,0.33 5.96,-0.2 1.34,-1.24 3.14,-2.44 2.81,-4.7 3.6,-1.07 3.05,5.99 -0.72,5.55 -2.56,1.8 -4.01,3.69 -6.41,6.17 -1.84,1.24 -2.25,3.28 -2.35,5.19 -0.73,2.27 -0.81,4.15 -1.4,6.21 -1.45,2.99 5.21,2.63 3.15,5.81 1.23,1.37 3.08,2.52 0.53,4.44 -1.81,3.61 -6.24,3.53 -9.77,4.18 -1.73,0.82 -6.12,-1.55 -3.98,1.19 0.09,0.85 -1.25,2.97 -0.48,3.95 -2.34,2.46 -6.17,-0.27 -7.67,0.15 -0.93,2.03 0.88,5.54 2.72,4.15 -1.82,-0.73 2.92,-0.79 1.15,1.34 -1.12,0.98 -3.33,-2.2 -3.28,0.26 3.11,0.18 -2.79,1.81 -1.17,4.26 -1.06,1.43 -1.16,1.79 -3.41,2.02 -3.12,0.46 -4.22,4.97 -0.11,5.5 2.5,-1.09 2.92,2.55 0.42,2.24 4.03,0.07 -3.58,2.29 -3.98,4.27 1.91,-0.03 -1.58,3.8 -2.46,0.96 1.37,0.97 -2.11,0.74 0.24,0.81 1.78,0.32 -4,2.73 -1.29,3.19 1.02,1.96 -3.25,1.19 0.2,1.42 1.71,1.25 2.07,2.82 -0.53,1.43 -2.48,-0.71 -5.1,-0.27 -7.6,-0.49 -2.44,-0.66 0.7,-5.17 -3.05,-3.47 -1.46,-1.34 -1.9,-4.01 0.28,-5.03 2.81,-0.82 0.2,-3.91 3,-4.94 -0.54,-1.08 1.09,-2.38 0.32,-3.13 -0.48,-1.25 2.67,-2.7 -0.09,-3.26 -2.17,-0.4 4.2,-0.77 0.96,-1.47 -1.77,-0.08 -0.35,-2.36 -1.11,-3.09 -0.73,-0.48 -1.48,-3.09 0.16,-3.65 -0.77,-2.43 -0.04,-5.42 0.85,-6.82 -0.11,-2.32 2.89,-2.81 0.79,-5.23 -1.04,-3.28 3.32,-4.38 2.05,-7.13 0.95,-1.8 2.76,-4.49 0.99,-5.93 0.39,-2.26 -2.32,-3.83 -0.66,-6.04 0.88,-1.84 1.61,-2.36 0.95,-4.74 z",GB:"m 468.18,66.85 c 0.73,0.5 0.74,-2.55 -0.62,-1.46 1.42,0.81 -1.37,0.46 0.75,1.28 z m -13.62,10.44 c 2.34,0.35 1.14,-1.51 -0.23,-0.73 0.93,0.37 0.65,0.52 0.23,0.73 z m 2.74,17.69 c 1.94,-1.8 4.37,-0.14 5.49,-1.81 2.97,0.29 4,-0.48 6.51,-0.56 1.81,0.42 5.84,0.27 6.27,-1.37 -2.46,-0.63 -2.69,0.17 -1.35,-1.05 -0.28,-0.25 1.95,-0.5 0.86,-0.91 4.04,-1.89 -1.41,-3.09 -3.21,-2.42 3.28,-1.88 -4.93,-2.87 -0.12,-2.21 0.67,-0.33 -1.42,-2.82 -3.46,-2.88 -0.53,-3.86 -4.23,-3.68 -7.05,-4 1.74,0.94 4.67,-1.34 1.43,-0.7 2.63,0.19 6.43,-5.05 1.4,-3.72 -2.31,-0.11 -5.91,0.89 -2.97,-0.38 -4.23,0.6 5.56,-2.36 0.85,-2.28 -2.58,0.5 -4.16,-0.23 -4.71,1.2 0.8,1.49 -2.11,0.36 -0.97,1.89 -2.22,0.06 1.76,0.51 -0.25,0.7 1.35,0.33 -0.33,0.5 -0.4,1.51 -2.78,-0.01 2.14,0.13 -0.35,0.25 1.87,0.7 3.13,-1.11 1.67,0.55 1.08,-0.88 -1.51,1.53 -0.73,1.44 0.63,0.34 -0.92,2.98 0.56,0.87 0.23,-1.39 1.08,-2.15 0.48,-0.62 0.95,0.66 0.6,-1.55 1.98,-0.18 -1.65,-0.35 0.4,2.76 -1.47,2.71 -0.18,1.03 2.74,0.56 4.25,-0.04 3.47,-0.7 -2.01,1.39 0.97,2.08 0.38,0.57 1.55,-0.34 0.51,1.4 -0.38,0.59 1.65,1.62 -0.37,1 0.46,0.91 -6.04,-0.4 -2.91,0.49 -2.13,1.49 -0.99,0.66 0.22,1.2 2.05,1.41 -5.17,2.73 -1.98,2.81 -1.74,0.99 2.93,-0.46 1.97,0.67 1.61,0.12 3.23,0.73 4.81,-0.64 -1.47,2.76 -5.23,0.39 -6.34,3.11 -0.58,0.81 -4.7,1.75 -1.57,1.9 z m -2.12,-19.78 c 1.63,-0.56 -1.14,-2.33 -2,-1.23 0.05,0.77 2.63,0.37 2,1.23 z m -1.93,-2.72 c 2.2,-0.45 0.89,-1.79 -0.58,-0.66 -1.07,0.55 0.17,0.89 -0.16,1.32 0.35,-0.04 0.65,-0.32 0.74,-0.66 z m -3.97,10.04 c 0.86,-0.99 5.51,-3.9 6.54,-0.8 0.26,-0.02 1.21,1 0.38,0.7 0.14,1.77 -3.34,0.88 -4.31,0.42 -0.25,1.29 -2.03,-0.1 -2.62,-0.32 z",CR:"m 233.81,206.48 c 0.33,-1.46 0.15,-3.05 2.63,-1.75 1.26,-0.41 3.08,0.93 3.6,0.56 0.32,2 4.06,3.34 1.96,3.95 0.36,1.38 0.15,5.52 -0.99,2.05 -0.87,-0.61 0.42,1.87 -1.24,0.14 0.64,-2.43 -3.28,-2.77 -4.36,-4.72 2.83,2.83 -1.34,1.74 -1.6,-0.24 z",PY:"m 298.18,298.18 c 1.02,-3.67 1.45,-8.62 6.57,-8.05 3.56,-1.4 6.47,1.43 6.83,4.47 -1.78,2.94 1.27,4.86 3.71,3.31 3.62,0.26 1.32,7.01 5.7,4.96 1.04,1.71 -1.13,4.37 -0.59,6.64 -0.96,2.02 -3.48,3.27 -5.13,3.5 -1.63,-0.54 -5.65,0.06 -5.82,-1.27 1.08,-2.1 4.76,-5.63 0.07,-6.27 -2.98,-1.78 -6.39,-2.41 -9,-4.65 -0.75,-0.97 -1.33,-1.92 -2.34,-2.65 z",GN:"m 430.29,204.77 c 0.33,-2.42 4.58,-1.46 3.32,-3.67 1.08,-2.79 4,1.15 6.35,-0.44 0.05,1.12 0.93,1.05 1.95,0.85 0.99,0.45 3.86,0.23 4.5,-1.01 0.56,1.49 3.03,3.02 1.43,4.31 1.61,-0.95 0.9,2.02 1.89,2.32 -0.98,1.79 0.93,2.72 0.52,3.91 1.83,1.69 -2.98,-0.02 -0.4,2.04 -0.69,1.73 -2.13,0.55 -3.22,2.32 -1.39,-0.69 -0.34,-5.01 -3.64,-3.51 -1.2,1.08 0.1,-2.46 -1.36,-3.31 -2.24,-3.04 -4.72,0.61 -6.61,1.65 0.51,-0.33 -1.42,-2.45 -1.34,-2.35 -0.73,-0.99 -3.15,-1.08 -2.02,-2.82 -1.2,1.34 0.12,-1.08 -1.37,-0.28 z",IE:"m 444.11,90.46 c -0.13,-0.4 1.83,-0.99 -0.44,-0.38 -1.79,-0.58 2.78,-1.53 -0.36,-1.09 -1.39,-0.37 2.35,-0.25 1.36,-0.92 1.45,-0.45 4.34,-1.04 1.19,-0.5 -3.64,0.91 3.84,-2.69 -0.27,-1.7 -2.94,-0.59 -0.76,-1.01 -0.54,-1.8 -1.56,-0.16 -0.99,-2.2 1.47,-0.74 1.83,-0.48 3.13,-1.09 1.36,-1.72 0.6,-0.78 2.86,-1.81 3.28,-0.91 -0.2,-1.24 2.32,-0.5 0.16,0.34 -1.91,0.64 -2.14,2.7 0.58,1.99 0.61,-0.54 3.44,0.81 2.42,1.05 1.41,1.91 0.22,5.57 -1.81,4.63 -1.85,0.81 -3.68,0.65 -4.27,1.59 -2.38,0.59 -3.98,0.52 -3,-0.14 l -0.42,0.04 z",NG:"m 479.33,213.43 c -0.74,-3.13 2.48,-4.29 2.68,-6.99 0.86,-1.84 -1.53,-5.34 1.23,-7.33 0.72,-3.9 6.68,-2.57 8.22,-0.04 3.1,-2.33 6.54,2.67 9.38,-0.98 3.15,-0.3 5.91,1.56 8.43,-1.04 2.01,1.12 1.79,3.72 3.2,4.78 0.16,2.42 -3.57,2.63 -3.59,5.45 -0.53,2.4 -2.48,3.65 -3.28,6.1 -0.85,1.35 -2.04,6.12 -3.53,2.87 -1.55,-1.18 -3.97,1.25 -5.54,3.03 0.46,1.96 -1.51,2.94 -1.66,2.96 -1.22,1.07 -3.43,0.25 -3.54,0.36 -0.26,-0.19 -0.24,1.57 -0.61,-0.46 0,2.25 -0.2,0.07 -0.89,1.32 -0.71,0.74 -3.87,-1.61 -2.56,-2.93 0.19,-0.74 -1.69,0.23 -0.33,-0.75 -1.03,0.52 -0.43,-1.14 -1.01,-0.35 0.07,-2.63 -6.4,-1.2 -3.46,-2.39 -1.78,0.87 -3.75,1.15 -2.82,-1.83 0.04,-0.6 0.02,-1.25 -0.32,-1.77 z",TN:"m 492.73,139.84 c 2.59,-1.31 2.36,-3.83 2.33,-6.21 -0.86,-2.13 4.15,-3.27 4.79,-2.72 0.36,2.04 1.6,0.55 2.76,0.85 -1.84,1.44 -2.03,2.98 -0.07,4.34 -0.98,2.24 -4.61,4.64 -0.5,5.19 0.95,-0.06 2.98,2.36 1.05,3.54 -3.75,0.6 -1.75,4.67 -4.77,5.76 -0.31,-3.76 -2.59,-6.57 -5.09,-8.89 -0.09,-0.65 -0.7,-1.13 -0.51,-1.85 z",PL:"m 511.14,86.91 c 1.18,-1.07 0.14,-2.61 1.23,-2.64 -2.71,-0.48 3.59,-0.81 4.68,-1.52 1.76,-1.47 7.6,-1.81 6.35,-0.5 1.16,1.25 3.78,-0.39 2.09,0.68 3.65,-0.2 8.06,-0.79 11.41,0.55 1.73,2.21 1.59,4.08 -0.5,5.24 2.09,0.18 -0.1,3.42 2.48,3.85 0.22,1.59 -5.48,2.45 -3.53,5.21 -2.76,-1.91 -6.66,-0.13 -8.87,-1.25 -2.82,0.03 -4.86,-2.26 -7.69,-2.79 0.45,1.84 -2.54,0.21 -1.59,-0.6 -1.56,-0.4 -4.91,-0.16 -3.77,-2 -1.91,-1.14 0.23,-3.58 -2.28,-4.23 z",NA:"m 504.49,284.16 c 2.67,-0.59 4.83,-0.78 7.32,0.45 3.68,-0.16 7.38,-0.03 11.06,-0.07 2.77,2.04 6.86,1.64 10.15,1.42 2.28,-0.53 8.45,-1.96 8.32,-0.28 -2.14,0.01 -3.73,3 -5.16,0.58 -2.85,0.57 -7.67,-0.11 -6.03,4.33 -0.21,2.99 1.49,7.83 -2.81,7.26 0.04,5.88 0.15,11.82 -0.09,17.68 -1.9,1.55 -5.65,2.34 -7.23,-0.2 -1.21,-2.07 -2.61,2.63 -4.13,-0.62 -2.03,-2.42 -2.71,-5.57 -2.83,-8.65 -2.09,-3.07 0.35,-7.14 -2.64,-9.75 -2.17,-2.8 -2.68,-6.66 -5.37,-9.07 -0.59,-0.91 -0.65,-2.04 -0.57,-3.09 z",ZA:"m 517.64,316.02 c 1.19,-1.75 2.24,-1.55 2.91,0.34 2.74,0.95 4.87,0.75 6.81,-0.97 0.05,-3.37 0.13,-6.75 0,-10.11 2.76,1 1.69,4.81 2.21,5.84 3.4,1.06 4.82,-2.6 6.62,-4.38 2.3,1.28 7.34,2.96 7.44,-1.4 3.59,-1.1 4.32,-5.96 8.43,-6.63 1.66,-1.27 5.99,-0.87 7.01,1.02 1.42,3.41 1.71,6.92 1.74,10.57 0.81,1.51 3.16,0.61 1.66,3.16 -0.2,3.58 -4.05,3.76 -5.13,7.09 -2.03,3.26 -5.17,5.67 -8.18,7.99 -3.36,2.5 -6,2.18 -9.22,3.16 -3.5,-1.28 -6.2,0.4 -9.48,0.63 -2.63,1.1 -5.23,1.31 -6.64,-0.86 -1.62,1.82 -0.59,-2.47 -1.89,-2.84 -1.44,-0.92 2.3,-1.41 0.56,-3.41 -2.1,-2.85 -3.25,-6.13 -4.83,-9.22 z",EG:"m 540.62,151.24 c 0.62,-2.12 -0.39,-5.44 2.84,-4.44 3.39,0.06 6.21,1.69 9.5,2.06 2.81,-1.31 3.87,-1.98 4.7,-1.77 0.47,-0.55 4.95,0.31 2.55,0.41 1.25,0.78 3.94,1.03 6.56,0.05 2.29,3.29 1.8,6.73 0.16,10.07 -2.92,-0.61 -3.58,-6.11 -5.17,-5.73 -0.08,2.73 3.43,4.48 3.31,6.42 1.6,2.88 2.84,6.25 4.98,9.06 2.21,1.08 -0.78,0.82 0.53,2.94 -1.41,1.63 -3.38,2.62 -5.01,4.17 -2.06,-1.14 -5.94,-0.88 -6.85,-0.91 -2.87,0.38 -5.9,0.08 -8.84,0.17 -2.85,-0.03 -5.72,0.05 -8.56,-0.01 -0.52,-6.86 0.27,-13.84 -0.17,-20.72 -0.11,-0.61 -0.23,-1.24 -0.51,-1.79 z",TZ:"m 553.31,249.69 c 0.05,-2.04 3.18,-2.61 3.48,-4.46 1.9,-1.22 -2.19,-2.05 0.62,-3.11 0.28,-1.93 -2.42,-4.35 1.3,-3.68 3.3,0.31 7.01,-0.92 9.83,1.32 3.58,1.98 7.47,3.62 9.08,6.89 2.99,1.01 3.44,3.34 1.84,5.82 1.21,1.91 2.3,3.4 1.63,5.05 -0.44,2.33 0.58,3.64 1,5.64 -0.1,1.36 3.86,1.55 0.32,3.04 -2.86,1.07 -5.18,1.74 -7.74,2.08 -1.96,0.13 -4.23,-0.06 -5.95,-0.53 -1.33,-1.85 -0.81,-6.41 -3.9,-5.14 -2.7,-1.2 -5.66,-1.91 -7.67,-3.9 -0.76,-3.22 -4.06,-4.84 -3.5,-8.25 z m 28.59,0 c -0.2,2.46 1.25,0.12 0,0 z",GE:"m 582.88,113.61 c 2.79,-0.74 6.67,0.6 9.61,1.38 1.83,1.51 4.27,-0.27 6.31,1.21 -0.92,1.66 3.09,0.79 1.37,2.38 3.41,2.97 -2.66,-0.44 -3.79,1.12 -2.66,0.97 -4.88,-0.74 -6.53,-0.77 -1.84,0.24 -3.15,0.32 -2.29,-1.68 -0.23,-2.66 -3.01,-2.48 -4.68,-3.63 z",SA:"m 590.61,189.47 c -0.88,-2.99 -3.92,-4.58 -4.89,-7.97 -1.27,-3.07 -6.19,-3.66 -5.52,-7.9 0.39,-2.99 -1.87,-5.27 -4.52,-6.93 -0.65,-2.58 -2.92,-5.29 -4.69,-7.94 -0.06,-2.16 -2.94,-1.82 -2.91,-2.56 0.23,-2.93 1.6,-3.33 4.3,-2.82 1.27,-1.95 4.6,-2.38 4.88,-3.78 -1.29,-1.21 -3.84,-3.03 -0.42,-3.02 3.89,-1.94 8.22,-0.99 11.6,1.61 3.52,1.8 6.13,5.86 10.43,5.55 2.65,-0.37 4.65,0.71 6.44,1.64 2.33,0.35 1.6,2.87 3.14,3.21 1.82,1.38 2.95,2.21 2.39,3.91 0.18,1.21 2.56,4.27 3.61,4.3 1.04,2.59 4.1,5.36 7.63,4.83 3.93,-0.7 5.02,2.87 3.03,5.92 -0.74,3.57 -5.79,2.83 -8.46,4.41 -3.59,1.37 -8.49,0.07 -11.59,3.09 -1.02,2.11 -2.19,3.42 -4.17,1.94 -2.95,-0.03 -6.49,-0.82 -8.92,0 0.04,0.99 -0.26,2.74 -1.34,2.51 z",VN:"m 755.17,172.63 c 0.91,-1.98 2.34,0.56 3.54,-0.94 1.04,0.45 2.54,0.26 4.02,-0.74 1.02,-2.2 3.13,0.9 5.07,0.38 -0.86,1.7 0.74,3.22 2.41,3.54 2.96,-0.51 -1.88,2.41 -2.21,1.77 0.48,1.03 -2.31,2.48 -2.75,4.57 -1.54,2.03 3.01,3.94 1.85,4.62 2.04,2.46 3.28,2.9 4.87,4.81 0.05,-0.85 1.95,1.9 1.82,2.41 1.55,3.64 0.92,3.94 1.59,7.16 -0.67,-0.85 -0.52,1.4 -0.52,1.15 0.64,1.16 -2.21,3.59 -4.5,4.49 -1.66,0.8 -2.66,-0.69 -2.43,0.39 -0.15,0.91 -1.37,0.04 0.09,0.95 -1.36,0.66 -1.47,-1.28 -0.44,0.82 -2.44,-1.62 -0.12,-0.77 -0.39,0.78 -1.94,-0.57 -1.84,-2.06 -0.82,0.3 -2.07,0.81 -4.16,4.06 -3.82,0.99 0.78,-2.37 0.92,-2.65 -1.03,-3.75 1.68,-0.88 2.76,-2.29 4.83,-1.03 -0.67,-1.4 -1.31,-3.13 0.78,-2.89 1.61,-0.85 4.31,-1.91 3.12,-4.9 -1.35,-2.37 1.11,-4.83 -1,-6.45 1.26,-1.25 -2.9,-1.71 -2.51,-3.92 -2.51,-1.44 -2.46,-4.26 -5.2,-4.92 -3.29,-1.8 0.52,-1.56 1.45,-2.95 -1.87,-0.74 -0.48,-2.8 -3.09,-2.24 -2.24,1.73 -2.61,-2.83 -2.89,-2.56 -0.49,0.58 -1.33,-1.6 -1.86,-1.83 z",RU:"m 971.63,34.32 c -1.5,-0.19 -5.91,2.38 -2.08,1.91 2.33,-1.01 8.44,0.86 8.55,-1.53 -2.14,-0.48 -4.28,-0.89 -6.47,-0.38 z M 699.9,23.18 c -3.3,0.24 -0.12,1.64 0,0 z m -33.68,5.84 c -1.81,2.27 7.41,0.46 3.17,0.09 0.98,-0.56 -2.87,-0.28 -3.17,-0.09 z m -31.55,9.7 c 0.94,-0.02 3.93,1.6 4.89,0.34 -2,-1.28 -6.78,-2.58 -4.89,-0.34 z M 645.6,7.94 c 2.1,0.81 6.73,0.71 7.81,-0.79 -2.55,-0.76 -5.11,0.83 -7.81,0.79 z m -9.12,0.95 c 2.18,0.64 7.53,0.74 7.73,-1.03 -3.66,0.05 -4.99,-0.33 -7.91,0.71 z M 623.91,28.36 c -4.56,1.43 2.66,0.38 3.65,0.95 1.19,0.47 4.53,-0.33 1.95,-1.01 2,0.9 2.82,-0.47 0.2,-0.48 -1.21,-0.19 5.53,0.74 1.33,-0.65 0.73,-0.06 5.38,0.3 2.36,-1.36 2.9,0.68 2.13,-0.65 4.55,-0.29 -1.54,-0.19 1.53,-0.23 1.67,-0.46 0.13,-0.76 -2.34,0.04 0.06,-0.73 2.54,-0.34 5.76,-1.32 8.84,-1.66 4.99,-0.57 10.06,-0.97 14.86,-2.56 -1.87,-2.8 -8.01,-0.49 -8.89,0.04 -3.71,0.39 -7.48,1.2 -11.28,0.77 -1.77,-0.36 -3.22,1.48 -3.1,0.45 -2.2,0.2 -0.1,0.46 -2.95,0.51 -4.08,0.54 -4.67,1.68 -7.82,1.94 0.15,0.68 -4.98,0.18 -1.05,0.85 1.4,0.2 -3.27,0.59 0.06,0.67 3.97,0.31 -5.23,-0.06 -0.89,0.73 1.24,0.01 -4.22,0.13 -1.04,0.61 -2.54,-0.21 -1.1,0.04 0.47,0.44 -2.25,-0.51 -4.99,0.93 -5.67,1.12 0.61,0.54 6.12,-0.52 2.67,0.12 z m -5.88,2.75 c 1.68,0.11 1.59,-0.22 0.24,0.32 1.51,-0.45 -0.47,0.57 -0.24,0.71 -1.06,0.7 -5.24,0.87 -2.19,2.36 1.86,-0.28 4.85,0.15 4.97,0.36 -0.41,0.75 2.03,0.62 0.25,0.77 -1.12,-0.13 0.72,0.92 -0.99,0.67 2.84,0.54 5.41,0.4 7.21,0.61 1.05,0.34 2.39,0.25 0.79,-0.27 2.41,0.43 2.75,0.37 2.04,0.24 4.33,-0.45 -4.41,-0.98 -4.36,-3.97 0.1,-1.36 -0.47,-1.57 0.41,-1.44 0.29,-0.6 1.54,-0.35 -0.03,-0.74 1.52,0.53 2.74,-0.78 0.38,-0.46 1.41,0.09 2.26,-0.42 0.53,-0.26 4.28,-0.45 -3.39,-1.33 -4.83,-0.64 -2.97,-0.14 -2.08,1.5 -4.89,1.6 z m -7.43,9.53 c -1.67,-2.02 -7.61,1.11 -3.2,1.55 0.98,-0.01 5.53,-0.93 3.2,-1.55 z M 626.15,6.83 c 1.89,0.57 9.44,-0.37 3.92,-0.56 -0.39,0.08 -2.89,-0.15 -1.08,0.44 -0.44,0.01 -3.74,-0.5 -2.84,0.12 z m -9.1,3.04 c 1.78,0.7 6.23,-0.28 2.16,-0.59 -0.28,0.25 -3.39,0.33 -2.16,0.59 z m -13.03,0.4 c 0.78,0.15 5.2,-0.26 2.8,-0.59 2.36,-0.85 6.52,-0.52 7.91,-1.38 -1.22,-0.29 -4.91,0.3 -1.6,-0.28 -0.59,-0.58 -3.5,0.14 -1.34,-0.24 -3.13,-0.25 -2.81,0.57 -3.31,0.85 -1.9,0.22 -7.46,0.41 -2.75,0.61 -2.3,-0.15 0.42,0.53 -2.01,0.44 -7.49,0.01 3.22,-0.16 0.3,0.59 z m -7.39,-1.66 c 3.46,1.24 7.04,-0.96 10.56,-0.08 -1.62,-1.32 -7.35,0.24 -10.56,0.08 z M 550.76,41.81 c 2.52,-0.75 5.08,-1.98 6.88,-2.34 0.83,0.45 2.94,-0.54 3.58,-0.55 1.61,0.32 3.8,1.03 0.66,0.9 -2.9,-0.77 1.31,0.46 -0.7,0.49 1.53,-0.15 3.23,-0.12 3.2,0.35 0.65,0.34 -2.22,1.33 0.48,0.31 3.07,-0.65 6.96,-0 10.19,1.01 3.84,1.6 6,1.58 9.2,2.84 3.79,0.78 1.59,4.16 -1.47,4.37 -4.11,1.23 -8.3,-0.06 -12.43,-0.41 -2.29,-1 -5.99,-0.44 -7.02,-1.96 -1.92,-0.17 -4.1,-0.36 -1.1,0.3 -1,0.77 3.67,0.87 0.81,1.12 1.5,-0.05 2.08,0.13 0.26,0.22 0.82,0.03 2.97,0.23 0.97,0.57 1.94,-0.29 6.08,1.31 3.11,2.8 2.07,1.43 0.39,2.95 3.74,2.79 1.49,0.82 5.7,2.62 6.11,0.27 -1.26,-0.29 -6.55,-1.72 -2.32,-2.66 1.78,0.52 3.44,1 2.99,1.37 2.04,-0.56 8.95,1.76 4.63,-1.44 -0.68,-2.3 5.02,-2.1 6.68,-3.68 1.48,0.13 4.51,0.29 3.19,1.35 1.11,-1.54 2.52,1.82 1.93,-0.73 2.17,-1.28 0.18,-2.28 -0.97,-2.98 2.76,-1.95 0.07,-3.27 -0.76,-3.71 2.57,0.57 8.38,-0.19 8.42,2.42 -2.12,-0.45 -6.53,1.12 -2.4,2.09 2.21,1.21 2.8,0.62 5.62,0.23 -0.91,-2.75 3.9,-1.49 3.35,-2.25 -0.81,-0.51 3.82,-0.62 4.96,-1.57 2.57,-0.84 3.87,-0.01 5.58,-0.28 -3.18,-1.57 8.33,-1.77 3.14,-1.4 -1.33,0.27 1.2,-0.16 -0.05,0.79 1.06,1.15 -3.55,0.91 -0.29,1.24 2.91,0.48 5.56,-2.03 9.15,-1.01 2.48,-0.4 6.59,-2.15 6,-0.25 0.12,1.82 2.64,0.33 2.43,-0.11 3.13,0.15 2.36,-1.62 0.18,-2.43 3.11,-1.19 6.74,-0.36 10.13,-0.11 1.58,0.26 3.57,0.97 3.25,1.14 2.44,0.14 5.15,0.73 8.09,1.98 1.56,2.29 5.21,-2.21 1.82,-1.33 -1.13,-1.89 -3.26,-1.52 -4.87,-2.19 1.38,-0.81 1.37,-1.23 1.32,-3.03 -4.77,-0.76 4.39,-1.49 4.45,-4.43 1.95,-2.69 4.8,-1.07 8.35,-1.33 4.1,-0.82 2.93,3.44 0.05,3.74 1.68,0.94 3.6,2.18 1.73,3.93 0.86,1.89 -1.11,4.11 2.42,4.15 0.76,1.93 -3.34,4.01 -4.19,4.65 -2.24,1.3 -6.63,0.18 -7.57,0.78 3.24,1.44 7.59,1.87 10.57,0.09 2.11,-0.42 4.07,-2.19 5.25,-3.57 -3.52,-2.7 3.66,-3.35 5.21,-2.4 3.33,0.55 -0.34,4.1 3.31,3.5 2.06,0.07 3.91,-0.08 1.06,-0.3 -2.07,0.51 -2.85,-1.52 -0.6,-1.72 -1.7,-3.02 -6.11,-2.78 -9.2,-2.28 -3.32,1.25 -4.05,-3.19 -1.18,-4.22 -1.37,-1.81 -5.14,-2.02 -1.67,-3.51 2.76,0.11 4.47,-1.6 3.74,-2.88 4.25,0.41 -2.19,5.52 2.88,4.66 2.84,-0.4 5.34,1.47 8.25,0.58 -2.11,0.46 -2.66,-1.43 -5.43,-1.02 -2.46,0.05 -3.97,-2.26 -0.53,-1.84 1.8,1.27 5.53,-0.37 1.63,-0.18 1.45,-1.57 8.02,-0.68 8.84,0.14 1.57,1.18 8.01,1.5 4.08,-0.7 -2.4,0.18 -4.08,-0.51 -4.45,-2.45 -2.02,-1.1 4.3,-1.77 7.14,-1.4 3.48,-0.3 8.76,0.04 11.02,-1.07 2.34,-0.1 -4.74,-0.7 -2.39,-0.95 -0.42,-0.13 4.58,0.38 1.01,-0.39 0.57,0.25 -3.6,-0.71 -0.97,-0.71 1.82,1.27 2.53,-0.93 3.62,-0.68 -3.88,-0.68 2.51,0.27 3.54,-1.02 2.47,-0.31 7.11,-1.08 10.59,-1.06 1.75,-0.02 6.32,-0.75 2.14,-0.57 -3.2,0.14 2.48,-0.71 1.89,-0.48 1.44,0.03 8.01,-0.59 4.39,0.68 2.73,-0.33 6.46,-0.1 6.71,-0.79 1.53,-0.5 7.67,0.84 3.11,-0.6 0.9,-0.72 6.18,-0.34 8.72,-0.09 -3.83,0.29 -4.57,-2.17 -0.62,-2.7 0.92,-0.73 6.69,-1.44 9.5,-0.53 5.79,0.7 -7.79,1.3 -2.14,1.22 1.05,0.09 4.77,-0.38 1.9,0.35 1.69,-0.44 8.06,-0.46 3.4,1.12 -1.39,0.48 3.57,0.27 4.44,-0.48 3.58,0.25 7.62,-0.79 10.87,0.72 -0.84,0.42 2.48,-0.24 1.6,1.27 1.98,-0.48 0.95,-0.87 2.48,0.42 2.08,1.99 -5.7,-1.07 -2.24,0.93 4.73,-0 -1.11,2.27 -2.81,2.4 -1.94,0.31 -5.86,1.12 -5.85,1.26 -0.9,1.24 -6.22,1.45 -8.63,2.71 -1.8,0.05 -6.59,2.58 -2.26,1.24 2.16,-1.56 5.9,-0.49 8.13,-1.3 2.01,-0.61 3.05,-0.34 6.04,-1.01 -1.37,-0.24 -5.76,0.35 -2.07,-0.9 2.24,0.69 3.44,-0.75 4.09,0.77 2.3,1.09 4.96,-1.83 4.83,0.98 1.66,0.24 1.77,0.4 0.3,-0.3 4.8,-0.78 9.67,-0.25 14.49,-0.31 1.31,0.49 -2.79,0.97 0.54,1.44 4.1,0.37 8.27,0.6 12.35,0.24 1.32,-0.38 -1.11,-2.03 1.18,-1.71 2.19,-1.28 4.98,1.13 7.38,0.3 -0.95,0.87 4.04,-0.89 5.45,0.66 1.57,0.25 2.9,0.72 0.29,0.99 1.1,0.24 3.22,0.23 0.84,0.44 3.89,0.1 -3.53,0.87 0.5,0.78 3.52,0.89 -0.52,1.51 -2.08,0.92 1.24,2.04 2.83,0.75 2.99,0.75 0.84,0.89 -2.7,0.35 0.17,1.39 2.02,1.74 6.76,2.73 7.4,-0.04 1.99,-3.1 6.22,1.88 8.33,-0.22 2.53,-1.29 6.77,0.83 8.07,0.88 -1.07,-0.26 2.51,-0.22 0.21,-0.7 0.86,-1.55 7.15,1.34 4.48,-1.25 -1.52,-0.35 3.99,-1.31 0.04,-0.9 -3.64,-0.53 4.95,-0.88 3.67,-1.44 -1.78,-1.2 3.85,0.5 4.95,-0.03 3.78,0.46 7.81,-0.29 11.41,1.08 -1.97,0.11 -6.62,-0.64 -6.82,0.4 1.84,-0.05 5.37,-0.93 6.18,0.11 -1.39,0.83 -1.65,0.91 -1.09,-0.09 -0.72,0.29 -1.94,-0.53 -1.24,0.75 -5.07,-0.17 0.46,1.63 2.66,-0.53 2.46,-1.06 7.58,-1.06 8.91,0.76 -1.79,-0.38 -4.53,0.84 -1.05,0.47 1.15,0.28 4.44,0.6 1.29,1.23 2.21,-1.04 6.84,0.24 4.96,0.85 5.17,1 10.36,-1.23 15.56,-0.33 2.68,-0.3 8.1,1.17 6.57,2.79 -1.16,1.6 4.4,1.44 5.94,1.15 4.24,-0.26 8.41,0.19 12.63,0.51 2.15,-0.66 4.01,-1.15 5.28,0.74 2.64,0.49 6.14,2.47 6.98,0.05 -1.24,-1.52 -3.26,-0.69 -1.32,-2.19 0.7,-1.37 8.12,1.13 7.9,0.16 3.46,0.52 7.04,-0.18 10.46,0.85 1.35,0.51 6.14,0.35 4.29,0.92 0.79,-0.46 4.68,1.19 2.41,0.52 2.16,0.46 5.41,1.1 5.43,1.55 1.19,0.56 3.35,0.93 0.79,0.09 2.16,0.97 5.03,1.12 7.95,2.4 1.33,0.64 1.4,1.9 1.88,2.97 0.84,1.19 3.75,0.75 1.22,0 2.47,-0.77 -2.9,-1.74 0.45,-1.46 1.76,0.08 2.44,0.01 2.51,0.67 0.81,-0.43 3.29,0.07 0.65,-0.46 2.22,-0.49 6.18,1.78 7.69,2.37 -0.96,0.09 1.65,0.5 -0.56,0.77 -0.76,2.14 -4.58,-0.74 -1.93,1.06 -2.47,0.6 -2.35,-0.21 -5,-0.36 2.44,0.29 1.25,1.6 0.94,1.38 2.18,0.9 -3.6,1.04 -1.04,1.29 -1.43,-0.11 3,1.25 0.18,0.73 -0.3,0.17 -1.68,1.32 -1.72,-0.36 -1.51,1.26 -4.85,-0.64 -6.58,-1.32 -1.81,-3.33 -6.72,0.72 -8.23,-2.23 -0.61,-0.31 1.99,-2.18 -0.21,-0.88 -1.28,-0.91 -1.46,-0.1 -2.74,0.68 2.99,0.99 -0.19,1.65 -0.28,1.65 3.75,-0.02 -5.6,2.72 -3.73,1.89 -2,0.91 -6.16,-1.99 -6.2,-0.85 3.84,0.06 0.17,0.91 -1.55,0.24 2.36,0.43 -0.59,1.16 1.81,0.69 1.75,-0.28 2.11,-0.33 2.58,1.05 2.16,-0.14 3.34,0.5 3.34,1.94 -1.67,-0.07 1.26,0.52 1.03,0.8 2.43,1.15 0.52,3.62 -2.11,2.22 -1.33,0.29 -4.96,-1.63 -2.89,-0.15 -3.34,0.63 -6.6,1.85 -8.87,2.22 -2.5,0.64 -3.1,1.12 -4.43,1.7 -0.56,0.3 -2.37,1.48 -4.94,1.89 -0.74,1.29 -1.85,2.25 -3.1,0.15 -3.55,-1.16 -6.98,0.24 -9.9,1.78 2.5,-3.25 -2.42,-0.68 -2.59,-0.69 -0.29,1.42 -3.63,-0.72 -3.75,0.28 -2.2,0.8 -0.97,2.76 -2.67,3 -2.8,1.07 -1.97,4.41 0.19,2.71 3.15,0.22 -2.73,3.14 1.25,3.62 1.02,2.49 -3.36,1.43 -0.75,0.35 -1.62,0.1 -5.18,2.81 -2.6,4.89 -1.79,1.07 -7.62,0.75 -5.76,4.09 0.54,0.69 -4.84,0.51 -3.78,1.21 0.26,2.66 -4.19,5.47 -5.34,4.98 -1.23,-1.82 -0.28,-3.97 -1.79,-4.95 -0.41,-3.63 -2.89,-7.71 0.14,-10.94 0.78,0.26 2.73,-2.06 2.57,-2.91 2.48,-0.16 4.7,-0.97 6.7,-2.47 1.76,-2.35 5.38,-3.06 7.18,-4.99 2.5,-0.93 5.74,-0.98 4.96,-3.13 0.24,-2.16 3.65,-2.8 3.73,-2.53 2.99,0.51 -2.88,-1.13 -4.15,-0.39 -2.79,0.04 -0.26,3.84 -2.75,2.81 -2.29,-0.33 -6.12,3.78 -7.01,2.35 1.19,-1.04 -2.28,-0.34 -0.36,-1.72 2.14,-2.08 -0.23,-0.94 -1.49,-1.11 -2.69,-0.3 -6.59,-0.44 -8.43,1.64 -1.71,1.5 -7.4,3.4 -6.03,4.97 0.96,-1.1 3.85,1.21 0.73,0.68 -3.09,0.1 -5.55,0.89 -8.63,0.83 -3.18,-0.96 4.91,-0.49 0.68,-1.26 -2.13,-0.49 -5.39,-1.56 -7.38,-0.56 1.01,0.46 -1.95,-0.17 -0.43,0.88 -2.55,-0.2 -5.58,-0.7 -7.83,0.16 -2.34,-1.32 -6.17,-0.32 -9.07,-0.26 -3.53,1.16 -5.89,3.73 -9.04,5.38 -3.56,1.69 -5.69,4.21 -9.53,5.74 -2.6,0.19 -3.31,2.3 -0.27,2.04 2.61,-0.94 2.14,1.14 2.26,2.27 2.19,0.07 -0,-1.75 2.56,-1.47 -3.02,1 1.77,0.77 -0.67,1.65 -1.82,2.34 5.63,-2.43 2.06,0.65 2.26,-0.43 0.18,-3.46 3.17,-2.04 1.97,-0.28 4.66,2.3 5.58,3.22 -1.6,0.15 -1.95,-0.45 -0.19,1.01 -0.15,1.5 1.17,2.27 -1.22,3.44 -1.55,2.06 -0.54,4.25 -1.6,6.27 0.41,3.03 -4.64,4.81 -5.79,7.92 -2.09,2.7 -5.25,4.3 -7.35,6.9 -1.64,1.88 -5.23,4.21 -7.92,3.24 -1.8,0.11 -0.61,-1.9 -2.3,-0.7 -0.16,-1.65 -1.87,2.49 -3.28,1.31 0.46,1.81 -1.75,-1.25 1.09,-0.86 1.52,-2.63 -2.24,-6.36 2.34,-6.58 3,2.1 3.84,-1.39 5.39,-2.88 -0.38,-2.5 3.83,-3.99 1.6,-5.81 -3.45,0.24 -6.72,3.17 -10.12,1.43 0.13,-2.81 -1.87,-2.89 -4.03,-4.41 -2.43,-0.09 -5.88,-0.38 -5.77,-3.74 -1.45,-1.8 -2.14,-4.2 -3.6,-5.7 -2.3,-1.41 -5.07,-1.76 -7.89,-2.02 -2.79,0.1 -7.06,0.22 -8.32,2.4 4.53,0.5 -1.32,3.5 -1.93,5.45 -0.81,1.4 -0.35,2.28 -2.87,2.41 -2.78,2.08 -5.92,-1.06 -8.94,-0.42 -3.56,-2.26 -6.54,2.47 -10.36,1.78 -3.78,1.2 -8.2,0.14 -11.23,-1.73 -3.43,-2.39 -7.69,-0.54 -11.49,-0.62 -3.53,0 -2.29,-4.58 -6.08,-3.84 -3.47,-1 -7.32,-2.39 -8.77,1.51 2.67,2.58 -1.84,4.43 -4.22,3.08 -2.65,0.3 -5.06,-0.22 -6.91,-1.86 -3.05,-0.27 -5.04,-1.01 -8.15,0.62 -3.41,0.72 -4.12,2.94 -7.39,2.57 -2.57,3.54 -4.17,-2.26 -5.85,-0.12 -3.45,0.09 -4.91,-3.11 -7.92,-4.23 -1.79,0.67 -5.66,1.46 -6,-0.37 -1.76,-1.57 -3.29,3.16 -4.42,-0.56 -2.08,-2.7 -4.08,-5.81 -7.45,-6.99 -2.94,-0.65 2.26,-2.26 -1.52,-1.44 -2.68,0.52 -4.55,2.65 -7.12,2.07 -1.71,1.15 -1.51,-1.01 -0.09,-1.09 -0.96,0.21 -3.6,-0.82 -3.19,0.06 -0.64,-1.58 -2.14,0.01 -4.41,-0.88 1.6,-1.26 -0.94,-3.7 -2.77,-2.54 -3.35,-1.83 -4.38,0.94 -7.69,1.08 -3.32,-0.09 -5.6,1.49 -8.71,1.36 -2.79,0.62 -4.93,1.23 -7.82,0.89 -0.95,0.56 -0.85,1.29 0.85,1.36 -3.67,0.63 4.3,1.18 0.3,1.57 -1.27,-0.73 -3.63,0.61 -1.72,1.73 -1.63,0.74 -4.26,1.08 -1.26,1.96 2.69,-0.04 3.89,2.45 0.51,2.77 -2.59,-1.22 -3.45,1.54 -6.07,-0.6 -1.28,-1.9 -3.02,0.94 -4.63,-0.61 -2.08,0.38 -3.98,2.57 -6.19,0.18 -0.36,0.13 -0.41,2.64 -0.76,0.47 -2.31,-1.41 -5.48,-3.19 -7.58,-1.69 -2.42,-1.77 -3.19,0.57 -6.05,1 -0.91,1.62 -3.37,0.79 -1.95,3.21 -1.78,1.59 -3.66,-3.44 -4.48,-0.03 -2.34,1.01 0.7,2.74 -1.77,3.83 1.72,0.88 1.16,2.91 3.55,2.32 1.92,0.42 4,3.42 2.02,3.3 3.17,0.87 0.68,1.16 0.15,1.87 0.14,0.2 -3.39,1.02 -3.1,0.78 0.05,0.65 -2.16,2.52 -2.08,3.59 2.36,0.01 1.19,3.75 2.55,1.49 -3.08,2.65 4.55,4.97 1.08,6.89 -2.15,1.53 -4.26,-2.15 -6.69,-2.29 -0.24,-2.22 -3.9,-0.8 -5.31,-1.29 -3.01,-1.83 -6.89,-1.88 -10.23,-2.23 -1.78,-0.19 -4.76,-3.39 -6.74,-3.27 -1.46,-0.98 -3.03,-1.13 -2.39,-2.05 -0.52,0.01 3.77,0.8 2.26,-0.72 1.23,-0.82 1.38,-1.35 2.52,-1.35 -1.99,-0.82 -2.6,-1.9 -0.03,-1.48 0.08,-0.87 4.3,-1.72 0.79,-1.49 -1.7,0.19 0.99,-0.52 -1.29,0.13 -1.34,-2.6 5.48,-1.24 4.27,-3.36 0.06,-0.72 -0.91,-1.17 0.33,-1.54 -1.63,0.07 2.15,-2.61 -0.74,-2.02 -2.43,-1.28 -4.96,-0.76 -6.99,-2.3 -2.48,1.01 -5.03,-0.02 -5.75,-2.1 -2.48,0.45 -2.83,-1.27 -2.31,-2.1 -1.74,-3.01 -5.47,0.3 -7.34,-1.15 -0.7,-1.78 -1.35,-3.13 1.38,-2.27 3.16,-0.33 0.44,-2.32 -0.94,-2.06 -0.65,-1.4 -3.51,-2.48 -2.43,-4.79 -2.86,-1.11 -4.55,-0.8 -7.42,-1.56 -0.34,-1.92 -1.75,-2 -1.49,-3.59 -2.06,-0.78 1.47,-1.38 -0.64,-2.52 -0.39,-2.21 2.22,-2.82 1.75,-4.13 1.41,-0.3 5.09,-0.54 5.47,-0.65 -2.54,-0.54 -4.68,-1.21 -4.25,-2.06 -4.58,1.28 2.09,-1.55 3.43,-2.54 2,-1.25 7.48,-3.89 2.47,-5.11 -3.07,-0.74 -0.25,-1.08 -0.51,-2.36 -2.14,-0.64 -0.29,-1.41 -2.37,-1.96 0.74,-0.38 -0.53,-2.35 1.36,-2.08 -1.86,-2.36 -3.8,-3.95 -0.44,-5.63 -1.21,-1.25 -5.27,-1.27 -3.5,-3.22 z m -24,40.71 c 0.88,-1.73 2.7,-1.43 4.13,-2.53 -2.85,1.07 0.58,1.91 0.81,0.07 2.08,0.22 6.34,0.97 4.11,2.79 -2.19,-0.03 -8.09,0.44 -7.89,-0.69 2.88,-0.71 -0.85,-0.45 -1.16,0.35 z m 223,-67.55 c -4.59,2.22 3.76,0.36 5.68,0.55 2.84,-0.28 8.08,0.35 9.36,-1.46 -2.8,-0.94 -5.59,-1.47 -8.48,-0.35 2.46,-1.14 1.51,-1.97 -0.83,-1.28 0.14,-0.04 -4.27,0.2 -2.19,0.83 -2.54,-0.13 0.16,1.15 -2.09,0.62 -0.66,0.05 -1.03,0.67 -1.45,1.09 z m -15.19,-4.39 c -2.63,0.17 1.01,0.57 -1.42,0.71 -2.13,0.29 -4.71,0.8 -1.08,0.67 1.38,0.3 5.95,1.77 9.06,1.42 2.18,0.49 5.08,0.87 8.3,-0.03 -1.67,-0.82 -3.11,-0.54 -0.78,-1.18 2.16,-2.45 -3.42,-1.89 -4.48,-1.31 -2.13,0.5 -2.34,0.64 -0.16,-0.48 -3.02,-0.52 -6.41,-0.32 -9.44,0.2 z m -8.47,-0.91 c 1.77,-0.54 6.96,0.07 2.33,-0.04 -2.54,0.03 3.07,0.57 -0.35,0.37 0.98,0.78 6.2,0.36 8.94,-0.08 2.09,-0.12 5.73,0.71 4.54,-1.43 4.76,-0.14 -1.67,-0.62 -3.05,-1.86 -1.06,-0.3 -4,0.25 -1.31,0.2 -2.23,0.29 -7.04,0.42 -7.73,1.41 2.63,0.43 -5.01,1.09 -0.54,1.12 -0.45,-0.23 -3.92,0.04 -2.82,0.32 z m -1.28,0.91 c 0.46,0.98 4.67,0.37 2.23,0.78 2.23,0.88 6.89,-1.15 2.05,-0.83 -0.89,0.18 -5.17,-0.51 -4.28,0.05 z m 160.93,14.73 c 1.89,0.35 7.14,-1.17 2.58,-1.29 -3.24,-0.5 -6.78,-0.05 -9.54,-1.02 -3.43,1.66 3.77,2.03 5.24,2.25 l 0.85,0.04 z m -25.17,3.62 c -2.36,0.58 3.2,-0.42 4.04,0.33 2,0.5 8.21,0.71 3.78,-1.2 -2.87,-1.22 -5.22,0.19 -7.82,0.87 z m -8.42,-6.04 c 0.81,-0.01 1.46,1.28 -0.37,0.79 2.5,2.49 6.34,1.41 8.91,1.5 1.91,-0.24 6.38,-0.99 5.66,-0.08 1.51,-0.16 6.2,-0.24 2.21,-0.64 -3.44,-0.75 -2.13,-2.75 0.12,-1.6 -2.84,1.84 5.45,2.38 5.47,0.44 3.07,-0.14 -3.3,-1.64 -4.56,-1.03 -2.46,0.55 -4.5,-1.5 -5.62,-0.66 -0.9,0.21 -1.25,2.33 -2.11,0.68 -2.65,-0.48 -5.34,-1.7 -8,-0.36 0.83,1.16 -1.43,-0.07 -1.71,0.96 z m 80.88,57.08 c -3.12,-0.06 3.2,3.15 -0.02,0.55 -0.07,-0.17 0.2,-0.4 0.02,-0.55 z m -7.79,-9.11 c 1.7,0.2 5.64,-2.63 1.53,-1.36 -0.47,-0.2 -0.5,1.33 -1.53,1.36 z m -40.64,36.73 c -2.29,0.38 -3.51,1.08 -5.23,2.73 0.13,-0.02 4.7,-2.12 5.23,-2.73 z m -7.3,2.84 c -1.21,1.14 -2.89,3.28 -0.55,1.04 0.9,-0.66 2.7,-0.78 0.55,-1.04 z m -9.66,-26.71 c 0.75,1.38 -0.43,1.65 -1.91,1.68 -0.17,2.36 -1.31,4.65 0.77,6.5 -1.23,3.17 -0,6.76 -0.66,9.65 0.05,1.47 -1.02,7.12 1.07,3.3 1.11,-1.76 3.47,1.34 3.17,0.46 -0.05,-2.77 -1.34,0.29 -1.58,-2.82 -2.96,-1.39 1.1,-6.32 1.28,-5.58 -0.61,-1.26 4.06,1.68 3.46,1.59 -1.93,-2.09 -2.68,-5.85 -3.35,-7.75 -0.75,0.02 -1.18,-3.19 -0.15,-2.9 -0.66,-1.12 -0.96,-3.6 -0.05,-0.95 -0.34,-2.36 -1.49,-2.05 -1.26,-3.92 -0.65,-0.64 -2.51,-0.14 -0.77,0.74 z m -14.76,-2.79 c -0.45,1.81 3.64,-0.2 1.02,-0.48 -0.44,-0.39 -0.62,0.49 -1.02,0.48 z",HT:"m 266.35,183.21 c 0.99,0.03 4.39,1.37 4.7,-0.3 -1.42,-0.77 -0.59,-2.99 -2.83,-2.75 2.53,-1.63 5.89,0.33 4.66,2.79 -1.39,0.24 1.33,2.8 -1.22,1.39 -1.06,-0.01 -8.54,0.81 -5.31,-1.14 z",BA:"m 515.61,108.85 c 1.82,-0.49 4.8,-0.16 7.39,-0.06 1.25,0.88 3.38,0.03 2.04,2.13 1.88,0.97 -0.1,0.88 0.72,2.1 -1.5,0.44 -2.48,1.02 -2.64,2.86 -2.69,-0.82 -4.05,-3.01 -6.27,-4.99 0.04,-1.12 -1.73,-0.78 -1.25,-2.05 z",IN:"m 661.02,168.77 c 2.15,-2.64 10.72,0.22 6.65,-5.13 -1.73,-1.18 -1.11,-2.71 -2.75,-3.65 0.53,-4.88 5.65,-1.01 7.64,-4.52 1.73,-3 5.85,-5.29 5.99,-8.59 1.43,-1.5 3.18,-2.4 0.21,-3.07 -1.63,-1.59 -1.49,-3.83 -1.68,-5.26 3.14,-1.38 6.62,1.29 9.3,-2.1 2.3,-1.39 2.86,2.18 4.57,3.04 -1.41,1.43 3.61,5 -0.35,4.78 -2.13,-0.68 -0.03,3.77 1.34,3.53 2.41,1.42 5.57,2.61 2.52,5.25 0.4,3.43 5.65,4.33 8.7,5.38 3.86,0 6.89,3.07 10.87,2.88 4.34,1.45 0.38,-6.23 4.38,-4 -0.49,4.9 5.68,2.25 8.5,2.78 1.05,-1.6 -2.06,-2.93 1.09,-2.86 3.05,-2.18 5.91,-4.26 9.57,-3.92 1.62,-0.78 1.39,0.8 2.11,1.23 -1.51,2.39 4,0.44 1.53,3.35 0.15,2.57 -3.8,0 -5.02,2.87 -1.52,2.68 -1.82,7.66 -5.35,7.78 0.34,1.88 -1.46,7.6 -2.6,3.28 0.56,-3.52 -1.66,-2.19 -2.84,-1.16 -2.77,-2.69 5.71,-4.36 1.08,-5.77 -2.64,-0.13 -5.78,0.48 -5.97,-2.74 -1.64,-0.43 -5.01,-2.22 -3.37,1.42 3.43,0.75 -3.64,3.11 0.58,3.64 -0.54,2.11 2.77,6.55 0.32,7.27 0.14,-2.94 -1.22,2.41 -1.79,-0.81 0.35,-1.41 -1.28,1.63 -2.67,1.7 -0.84,2.4 -2.43,5.18 -5.24,4.8 -1.25,2.86 -5.37,5.36 -8.05,7.72 -1.36,2.67 -8.24,3 -6.05,7.72 0.67,2.63 -0.29,5.48 -1.25,8.74 1.46,2.18 -3.6,4.23 -1.64,5.29 -3.2,-0.57 -3.39,5.55 -6.34,1.92 -1.91,-2.81 -1.52,-5.2 -3.5,-8.28 -2.54,-3.3 -2.51,-7.76 -4.9,-11.15 -1.54,-3.02 -2.32,-6.83 -2.3,-10.07 -0.66,-0.95 -0.81,-5.32 -0.56,-7.14 1.96,-0.46 -2.38,-1.05 0.2,-1.88 -3.14,-0.91 -1.37,5.66 -5.59,4.12 -2.56,-0.4 -7.37,-5.37 -2.47,-4.86 3.91,-3.18 -3.31,1.14 -3.81,-2.38 -0.35,-1.97 0.32,-0.7 -1.1,-0.85 l 0.06,-0.34 z m 68.31,30.59 c -0.76,2.09 -0.39,5.08 0.27,1.28 0.49,-1.51 0.24,-4.58 -0.27,-1.28 z",CN:"m 676.19,124.66 c 0.98,-1.83 3.25,-2.89 5.46,-3.21 1.72,2.58 3.49,-2.51 6.01,-1.25 1.96,-1.24 5.07,-2.13 6.86,-3.29 -0.65,-2.73 3.04,-2.13 0.47,-5.26 0.7,-1.82 -0.76,-1.93 -1.33,-2.46 2.18,-1.71 6.37,0.32 7.36,-1.37 -1.71,-0.44 0.85,-4.93 2.17,-4.89 2.66,1.09 7.77,1.38 6.25,-2.85 1.12,-1.02 3.17,-0.74 3.53,-2.58 3.42,-0.89 2.49,1.94 5.53,2.87 2.83,-0.07 6.03,2.03 5.68,5 -0.4,1.63 -1.19,3.52 1.67,3.39 3.81,-0.25 7.11,1.27 10.59,2.43 0.94,2.78 3.39,5.13 6.71,4.15 3.26,0.94 6.58,-0.02 9.86,0.56 3.02,1.55 6.81,1.97 9.99,2.52 3.46,-1.14 7.28,-2.67 11.14,-2.14 3.36,0.17 5.74,-2.56 8.43,-3.8 -3.55,-2.2 0.96,-5.11 3.39,-2.82 2.7,-0.29 4.66,-2.42 7.58,-2.27 1.89,-2.66 5.14,-2.73 8.12,-3.34 3.52,1.8 3.22,-1.63 0.5,-2.54 -2.9,-2.4 -7.09,1.31 -9.71,-1.03 1.84,-1.66 2.23,-6.59 5.67,-4.32 1.73,-0.47 5.28,-0.91 4.66,-2.74 0.65,-2.25 5.72,-3.82 3.46,-5.87 -3.15,-0.11 2.81,-2.56 4.29,-2.25 3.47,-0.72 6.65,0.61 9.85,1.03 1.47,1.29 2.72,2.4 3.14,4.21 1.6,1.71 1.34,4.47 3.28,5.65 2.63,-0.12 6.8,1 7.49,2.86 -0.06,3.38 4.81,2.8 7.08,1.24 2.05,-0.65 5.63,-1.43 3.84,1.81 -2.18,1.1 -1.62,4.59 -3.99,5.76 -0.74,2.42 -3.94,-0.86 -5.37,1.36 -0.68,1.23 1.33,6.54 -1.79,6.4 0.65,1.89 -2.28,-3.04 -2.11,0.29 -1.2,1.07 -3.81,1.45 -4.57,1.93 0.9,3.73 -4.61,-1.94 -5.15,1.91 -2.77,1.4 -5.47,3.91 -8.38,3.98 -1.83,0.26 -4.94,3.72 -5.71,2.3 1.87,-0.72 2.25,-1.37 -0.02,-1.55 1.16,-1.01 4.13,-2.63 1.82,-4.16 -2.71,0.14 -4.38,2.63 -7.17,3.79 -0.26,2.82 -6.3,-0.06 -4.28,3.62 1.87,1.47 4.4,-0.22 3.61,3.08 2.93,1.27 3.81,-3.22 6.7,-0.84 1.85,0.07 4.74,0.71 2.12,1.94 -0.85,-0.61 -2.49,0.66 -4.04,0.94 1.1,0.5 -1.5,1.4 -1.84,1.14 0.85,1.18 -5.19,3.68 -0.97,4.61 3.04,0.63 2.02,5.26 4.61,6.08 0.38,0.89 2.57,2.29 -0.03,1.31 -1.71,-0.33 -3.98,-0.32 -4.77,-1.14 0.2,0.17 1.18,1.11 3.05,1.01 1.24,0.82 5.05,3.07 1.14,3.56 -0.82,1.12 -4.41,1 -1.66,1.07 0.15,1.25 2.24,-0.9 3.32,0.96 1.68,7e-4 -2.37,2.24 -0.01,0.92 1.46,1.66 -2.03,0.31 -0.33,1.92 0.25,0.71 -2.53,0.11 -0.44,0.9 0.21,2.07 -1.3,-0.07 -2.09,1.69 -0.34,-1.27 0.17,3.3 -1.32,1.9 0.31,1.21 -1.28,2.5 -1.15,1.82 -1.91,-0.55 1.16,1.12 -1.18,1.94 -2.52,-0.3 1.93,-0.49 0.12,1.08 0.75,1.16 -1.52,-0.46 -0.8,1.16 -1.3,-0.69 -1,1.85 -2.11,1.43 -1.44,0.56 -1.84,0.37 -1.2,1.22 -0.58,1.11 -3.91,1.83 -3.53,2.7 -0.54,1.17 -3.16,1.16 -3.37,1.31 -0.21,0.3 -1.88,0.26 -2.25,0.37 -0.4,-0.54 -0.32,1.21 -1.98,0.36 -1.09,0.06 -1.43,-1.98 -0.79,-1.66 -2.48,0.28 0.3,1.37 -0.8,2.65 -1.59,-2.37 0.45,1.1 -1.14,0.16 -0.53,2.04 -2.1,-0.49 -2.11,1.18 -1.28,-0.82 -3.27,1.61 -4.19,0.79 -0.95,0.09 -1.82,1.58 -1.18,1.78 1.08,2.17 -1.87,1.65 -1.57,0.11 -0.21,-1.58 0.77,-2.41 -0.73,-2.43 -0.66,1.54 -1.9,-0.84 -2.21,-0.27 -0.84,-1.36 -0.36,1.63 -2.6,0.24 -1.39,0.29 -4.02,-1.33 -2.89,-2.75 -0.33,-1.32 -3.39,-0.8 -4.13,-1.88 -1.6,1.63 -3.37,1.75 -4.75,2.01 -0.87,-0.55 -1.87,0.5 -2.97,-0.31 -0.93,1.27 -3.59,0.13 -1.88,2.76 1.13,2.12 -2.19,1.84 -1.82,-0.17 -3.05,2.72 -2.46,-1.51 -5.31,-0.98 0.41,-1.66 1.61,-2.8 -0.66,-3.07 -0.66,-2.14 -0.16,-3.51 -3.08,-2.08 -1.52,-0.11 -0.44,-4.15 1.31,-4.64 1.63,-0.67 1.84,-5.2 0.32,-5.63 -0.62,-0.82 -1.93,-3.56 -3.2,-1.93 -1.58,-0.67 -3.46,0.33 -2.03,-1.74 -0.71,-0.71 -1.24,0.1 -0.84,-1.18 -1.66,-0.28 -3.02,1.33 -4.67,0.18 -3,0.65 -5.1,5 -7.99,3.9 -2.19,-0.27 -4.92,-2.17 -6.72,0.26 -1.39,2.29 -0.88,0.42 -1.34,-0.74 -2.24,1.12 -6.05,-0.24 -7.37,0.15 -1.72,-1 -3.07,-1.32 -5.2,-2.73 -2.13,-0.8 -4.8,-3.35 -7.47,-3.95 -1.43,1.75 -3.37,-1.06 -4.89,-1.65 -1.25,-0.09 -1.77,-1.57 -2.84,-1.23 0.24,-1.36 -2.08,-4.64 0.61,-2.74 3.94,-1.31 -2.27,-3.7 -0.23,-5.46 -2.53,-0.9 -2.49,-4.23 -5.76,-3.62 -1.19,-0.91 -2.65,-0.68 -2.66,-2.99 -1.4,-0.97 -5.85,-1.1 -2.11,-2.04 -0.68,-1.91 -0.62,-4.36 -3.56,-3.51 0.02,-1.05 -0.58,-1.09 -0.48,-2.31 z m 97.06,56.63 c -1.11,3.9 4.72,3.69 5.27,0.66 1.63,-1.13 0.73,-3.74 -0.44,-2.35 -2.21,-0.55 -3.09,0.13 -4.83,1.69 z m 14.89,-8.96 c 1.82,-0.56 0.14,1.44 0,0 z", CA:"m 244.83,107.58 c 2.56,-1.73 -4.42,-1.05 -3.23,-0.93 1.16,0.78 3.66,0.35 3.23,0.93 z M 104.71,86.08 c 3.08,-0.45 -0.47,0.88 2.33,2.09 2.62,1.96 -2.81,-1.34 -2.42,-1.74 -1.02,0.29 -0.33,-0.53 0.09,-0.35 z m -1.61,-1.25 c -1.31,-3.08 3.61,-0.28 1.49,-0.2 -0.93,0.47 3.48,-2.79 1.31,-0.11 0.03,2.3 -3.46,1.11 -1.72,0.94 0.13,-0.07 -2.15,-0.68 -1.07,-0.63 z m 7.14,-1.3 c -1.97,0.46 -0.13,0.25 -0.04,0.57 0.33,0.02 0.37,-0.51 0.04,-0.57 z m 0.86,1.37 c 2.1,1.83 1.37,-0.53 -0.62,-0.93 l 0.14,0.3 z M 309.76,101.63 c -3.74,1.04 -2.32,-2.06 0.32,-2.47 -0.97,0.04 -3.66,0.15 -1.35,-0.7 -0.84,1.41 2.48,-1.8 2.19,-0.65 0.19,-0.53 -0.89,-1.18 0.45,-1.6 0.95,-2.81 2.42,-5.02 5.54,-5.58 2.59,-0.18 -1.89,0.8 0.31,1.17 -0.88,0.61 -4.78,5.96 -1.61,3.18 0.66,-0.31 3.99,0.11 1,0.89 -1.13,0.11 1.44,-0.04 -0.42,0.73 1.61,0.2 3.54,-0.84 2.04,1.08 1.8,-1.07 2.61,-1.46 5.38,-0.6 -0.87,1.02 -2.5,1.19 -1.2,1.4 0.05,0.34 0.89,0.09 -0.7,1.12 0.85,-0.39 5.1,-1.26 1.4,0.5 -1.56,0.18 0.51,0.26 -0.6,1.31 0.53,1.55 3.59,-3.11 1.88,-0.13 -0.79,1.96 2.57,-1.37 1.04,1.65 -1.29,3.05 -2.65,0.4 -2.49,0.31 -2.83,2.91 0.95,-4.13 -1.92,-1.48 -0.31,0.75 -1.34,0.25 -2.54,1.84 -5.04,0.91 3.13,-2.34 0.7,-1.86 0.01,-0.95 -1.74,0.99 -2.54,0.22 -0.35,0.19 1.48,-1.19 -0.49,-0.8 -1.06,1.54 -4.96,-0.09 -6.38,0.48 z m -13.97,-6.19 c 1.75,0.03 7.41,2.68 2.62,2 -1.7,-0.27 -9.04,-3.24 -2.62,-2 z m 4.16,7.1 c 0.66,-1.17 2.5,-1.07 0.33,-0.31 1.28,-1.24 -0.82,1.46 -0.33,0.31 z m 1.76,4.82 c -1.8,-2.58 4.69,-5.31 2.21,-1.89 0.47,0.45 -2.17,0.92 -0.8,0.9 -2.04,1.42 2.53,0.05 0.14,0.01 2.15,-0.94 -0.29,-0.55 1.6,-0.81 2.95,0.15 -1.22,2.57 -3.16,1.8 z m -6.54,-1.87 c -0.58,-0.96 -3.25,-1.11 -0.83,-2.23 -0.95,2.98 4.97,1.1 5.26,1.88 -1.31,0.29 -1.93,2.05 -2.6,0.23 -0.48,0.53 -1.25,0.39 -1.82,0.12 z M 146.14,38.73 c 2.04,2.16 5.73,2.16 9.08,2.11 1.9,0.06 2.05,3.48 5,1.99 3.34,-0.06 4.47,-0.1 8.62,-0.48 2.71,-0.39 5.75,-1.68 7.96,-1.82 0.12,1.1 4.45,0.21 3.52,1.24 2.83,-0.26 7.06,1.05 9.03,-0.86 -0.38,-0.8 -2.91,-0.99 -3.76,-0.03 1.4,-0.93 -2.45,-1.6 0.72,-0.97 2.01,-1.22 4.15,-0.13 5.74,-0.72 -1.83,-1.48 -4.91,-1.85 -7.57,-2.29 -1.3,-0.45 -3.98,-1.07 -2.25,-2.41 -2.43,-2.5 -4.5,-5.8 -8.66,-4.99 -3.42,-1.76 -2.11,2.72 0.38,3.84 -3.4,2.2 -3.08,-2.67 -6.23,-2.63 -1.42,-0.87 -5.32,-0.51 -1.4,0.18 0.95,0.32 -1.7,-0.12 0.11,0.65 -0.55,-0.65 -6.22,1.4 -4.5,0.11 2.51,-1.3 -6.37,-2.15 -5.77,-0.39 -0.62,-0 -4.26,0.38 -0.94,-0.68 -0.75,-3 -6.07,-0.2 -8.52,-0.27 -1.42,0.62 -5.7,1.08 -3.04,2.03 -1.61,0.03 -4.53,1.63 -1.3,1.71 3.11,-0.04 1.83,-0.01 1.51,0.64 2.2,0.34 7.16,-0.7 7.75,-0.15 -0.67,0.01 -3.39,0.09 -1.34,0.17 -1.74,0.94 -6.48,-0.16 -6.63,1.54 3.31,1.26 7.06,0.66 10.55,0.55 2.33,0.08 6.76,0.17 7.64,1.17 -4.22,0.32 -8.48,-0.56 -12.71,0.23 -1,0.12 -2.1,-0 -3.01,0.54 z m -51.48,2.87 c 1.51,-0.44 0.54,0.2 0,-0.73 2.66,0.85 -1.13,-0.61 1.69,-0.17 0.89,-0.89 6.58,-1.37 2.64,0.17 -1.82,0.74 1.69,2.23 -0.86,0.9 -1.04,-0.24 -2.37,0.11 -3.48,-0.17 z m -11.14,24.71 c -4.74,0.6 -2.53,-5.35 -2.97,-8.32 0.51,-5.66 -0.5,-11.32 0.04,-16.99 0.37,-3.03 6.07,-0.4 8.15,0.13 1.99,0.48 7.51,1.29 7.51,0.89 -1.1,-1.47 5.34,1.78 2.19,-0.64 2.81,-0.87 5.93,-2.03 6.76,-1.83 2.59,-0.42 4.86,-1.4 6.95,-1.35 1.76,1.12 -5.93,1.39 -3.63,2.45 0.33,-0.17 4.33,-2 5.58,-1.66 -1.4,1.69 4.21,-1.53 3.24,-1.01 -3.01,-1.53 3.09,-0.15 3.51,1.65 0.73,1.14 5.49,0.83 2.6,0.69 1.65,-0.06 -0.15,-1.24 2.02,-0.79 -3.09,-0.18 2.76,-1.13 -0.53,-0.74 -1.08,0.04 2.55,-0.88 1.76,0.74 2.39,-0.25 -1.7,1.07 0.84,1.05 3.31,-1.3 7.01,-1.88 10.62,-0.29 3.1,0.19 9.75,2.4 10.88,1.31 2.3,-0.71 9.6,1.99 3.82,2.28 -3.69,1.21 3.34,1.18 5,1.34 3.05,0.26 6.43,-0.99 8.67,-0.54 2.92,1.04 3.42,0.68 5.84,2.08 -1.21,0.61 -2.78,-0.51 -0.62,0.69 0.02,0.11 4.35,2.54 1.08,0.32 -0.2,-1.47 2.67,0.58 0.4,-1.56 -1.79,-2.45 -0.5,-1.93 2.44,-2.39 1.25,-0.6 4.56,-1.41 1.27,-1.14 -1.22,1.15 -5.07,0.43 -4.44,1.07 -2.82,0.55 -2.81,-1.33 0.45,-1.36 2.84,-1.2 5.87,-0.6 7.18,1.02 2.5,0.5 6.03,1.19 9.28,1.53 3.16,-0.39 6.32,0.22 9.42,-0.09 -3.13,-1.65 6.02,1.92 2.48,-0.46 -1.47,0.42 -3.22,-0.33 -2.65,-0.73 -1.46,-0.58 1.58,-0.36 2.14,-0.86 1.7,0.7 3.29,0.69 3.14,1.29 3.15,-2.04 -0.1,1.34 0.82,1.78 0.03,0.71 2.07,0.16 1.6,0.63 2.8,1.45 -4.59,-0.3 -0.84,1.17 1.91,1.41 -1.85,-1.34 1.34,-0.34 2.24,-1.01 -2.52,-3.19 1.31,-3.24 2.32,-0.41 5.92,-1.92 2.95,-2.58 1.52,0.95 -3.62,0.77 -0.77,-0.19 0.69,-0.24 -0.97,-1.12 1.23,-1 1.07,-0.19 -1.05,1.24 1.2,0.11 -1.98,-0.81 -6.79,-0.11 -8.7,-2.42 0.49,-1.44 2.05,-0.47 1.56,-1.36 -3.41,0.98 -1.2,-3.45 0.98,-1.7 0.81,-1.07 -2.56,-0.64 0.04,-1.02 1.52,-0.69 3.3,-0.36 0.61,-0.67 2.31,-0.94 1.96,1.17 4.35,0.52 1.51,0.23 2.43,2.42 2.9,2.66 1.31,1.26 5.29,1.89 1.1,1.83 -0.59,0.47 2.37,0.22 -0.18,0.8 -2.2,0.78 0.07,0.46 1.98,0.93 0.32,-0.62 2.69,-0.19 0.6,0.02 2.11,-0.14 4.46,0.1 1.28,0.73 -1.45,-0.18 2.86,0.97 1.26,2.35 2.09,1.79 1.56,-2.7 4.37,-2.24 1.94,-0.05 4.58,3.11 1.77,2.69 0.16,2.3 2.94,3.74 4.7,2.5 1.67,-1.64 2.36,-4.21 4.92,-4.09 -2.14,-0.41 2.15,-0.47 -0.59,-0.85 -1.27,-0.32 -2,-2.61 1.21,-1.98 1.52,0.28 6.69,0.22 5.83,0.88 -3.77,0.13 1.61,-0.21 0.99,0.83 2.21,-0.61 3.43,0.98 0.4,0.98 3.59,-0.39 1.24,1.58 -0.92,1.12 -1.08,0.83 2.57,1.41 3,3.55 -2.46,1.68 -6.78,3.08 -7.84,1.39 -1.45,-0.72 -0.92,-1.23 -3.01,-0.4 2.13,-0.37 2.4,0.17 3.84,1.87 -1.74,-0.44 -1.92,-0.44 -2.14,-0.01 -1.88,-0.53 -4.67,-1.55 -6.03,-0.58 5.01,0.23 -1.93,4.21 -4.04,2.76 -2.46,-0.63 -0.25,-0.53 -0.98,-0.62 -1.3,-0.23 -4.45,-1.31 -3.5,-0.51 -0.92,-0.36 -5.89,-0.27 -2.81,-0.25 3.62,0.89 7.31,1.87 10.82,2.38 -1.03,1.96 -5.91,3.47 -5.86,3.16 -2.55,-0.41 -1.86,-1.03 -3.11,0.91 -2.26,0.36 -8.59,-1.05 -9.49,-1.16 -1.29,0.69 5.54,0.37 3.02,1.39 2.48,-1.49 7.83,1.65 2.78,2.06 -2,-0.32 -2.91,0.06 -1.22,0.73 -2.3,-0.36 -2.13,1.41 -2.74,0.73 -0.46,1.4 -2,0.53 -1.39,1.69 -2.08,0.11 -1.69,3.18 -3.61,3.39 1.24,2.05 -1.5,4.67 0.69,4.72 1.72,0.47 -0.41,2.56 0.92,0.17 3.52,-1.7 5.11,4.92 4.09,5.11 2.05,-0.87 -0.14,0.49 2.28,-0.39 3.91,-0.93 8.29,0.43 11.65,2.49 1.68,0.86 7.67,1.82 6.77,3.05 3.47,-1.04 5.34,-0.15 8.43,0.06 -0.75,2.1 0.08,4.64 0.26,6.34 0.95,0.51 2.19,1.47 0.57,1.79 1.75,-0.47 5.9,2.21 2.48,3.28 1.45,-1.58 4.45,0.08 4.46,0.44 -2.57,-2.9 1.98,-1.26 1.46,-1.41 -0.03,-0.6 0.01,-1.95 0.9,-2.64 -0.74,-1.25 -1.02,-2.3 -1.44,-3.61 0.46,-0.89 -0.11,-1.27 -1.11,-2.3 -0.61,-1.94 5.15,-1.38 6.52,-3.51 4.22,-2.81 -0.03,-7.89 -3.79,-8.31 0.71,-1.66 2.34,-2.57 2.83,-3.18 2.16,-0.49 -1.96,-1.63 0.02,-2.24 -1.18,-0.38 -0.34,-0.96 -1.74,-0.73 1.34,-1.63 1.99,-2.18 -0.12,-3.43 1.39,-3.46 6.51,0.15 7.76,-0.76 1.59,0.83 7.15,-1.66 7.06,1.25 2.62,-0.24 1.07,0.89 3.46,0.75 -2.6,1.14 3.21,1.14 4.45,2.06 2.49,-1.25 0.42,0.81 0.74,2.26 -1.55,0.38 -5.34,-0.21 -1.48,0.2 2.84,-0.45 0.35,2.25 2.73,2.1 -0.79,0.38 -1.21,1 -1.63,0.92 -3.1,1.8 5.98,-1.97 3.87,2.16 -2.05,0.74 -3.51,1.21 -0.56,0.39 0.74,-0.84 2.92,-2.33 1.42,-0.37 1.54,-2.1 0.73,1.37 1.52,-0.15 2.02,-1.1 3.96,-2.16 4.54,-0.78 -0.45,-1.4 -0.11,-1.39 1.63,-1.63 -1.87,-0.2 -0.13,-1.63 0.85,-0.95 -3,-1.08 0.71,-0.99 -0.29,-1.45 0.91,-2.07 2.88,-0.97 0.98,-0.29 1.57,-0.58 2.12,1.13 2.7,1.58 -0.6,0.25 2.61,0.74 -0.48,1.01 1.67,-0.01 2.25,0.09 2.22,0.58 2.6,0.65 -3.13,1.67 0.2,1.16 1.49,-0.33 1.62,0.85 -0.52,1.25 1.9,-0.82 2.82,-0.05 2.23,0.08 0.83,-0.29 2.46,1.54 0.07,1.35 2.52,0.36 3.26,1.57 1.98,2.24 -0.58,0.44 -2.7,-0.45 -0.87,-0.35 -3.2,-0.39 1.42,0.76 -0.46,0.58 3.04,0.12 -1.17,0.41 1.49,0.85 -0.49,0.02 -1.23,0.41 0.92,0.26 -0.3,1.01 2.15,1.52 2.48,1.25 0.63,1.08 0.41,1.41 -0.1,1.83 -1.42,0.88 2.43,-1.21 0.72,0.34 2.06,-1.37 0.58,-0.05 2.32,-0.32 -1.99,1.75 -0.59,0.5 0.78,-0.21 -0.83,0.96 3.44,0.91 4.93,1.94 -0.91,0.27 -0.18,0.2 -2.27,0.57 -1.84,0.6 -5.63,1.13 -1.65,0.57 2.38,-0.14 -3.16,0.98 -4.22,1.44 -2.7,-0.93 -0.89,-0.3 0.64,0.4 -2.33,1.52 3.75,-1.61 5.49,-1.72 1.43,0.01 -2.41,-0.46 0.42,-0.41 1.33,-0.34 3.51,1.41 1.11,1.71 1.48,0.47 2.57,-1.08 3.17,0.01 0.44,-0.43 2.91,1.71 0.64,1.58 1.75,0.56 -0.62,0.7 1,1.17 -2.22,0.27 -1.65,-0.28 0.39,0.74 -2.99,-0.77 1.74,0.68 -1.38,1.45 -2.37,2.12 -6.86,0.27 -7.89,2.97 -1.65,1.5 -6.45,2.06 -7.47,1.84 -3.18,-0.9 -6.5,0.01 -9.77,-0.48 -3.36,-0.48 -4.38,1.16 -6.78,2.81 -2.39,0.17 -3.24,1.57 -5.77,3.28 -1.99,-1.07 -5.18,-0.85 -1.58,-0.48 5.12,0.4 -6.41,5.83 -0.69,3.68 3.03,-2.92 6.6,-5.2 10.86,-5.86 1.82,-1.26 7.93,0.03 6.2,0.88 1.62,1.52 -3.47,2.53 -4.77,1.78 -4.06,0.15 1.22,0.69 2,1.16 3.73,-1.32 0.07,1.86 0.75,1.84 1.83,0.1 0.7,3.59 3.79,2.65 -1.76,0.9 3.54,1.02 3.53,1.41 1.87,-1.26 6.01,1.52 2.29,1.42 -2.69,0.95 -5.02,1.96 -6.51,1.47 -0.37,0.6 -2.85,2.58 -3.65,2.69 -1.04,1.1 -3.75,-1.22 -1.3,-2.57 -2.36,0.6 1.63,-0.33 0.76,-0.7 2.73,-1.8 3.52,-0.2 5.5,-1.39 1.65,-0.88 -5.93,0.77 -2.59,-1.05 1.32,-0.18 -1.05,-1.65 -0.69,0.14 -1.72,1.42 -4,0.4 -3.9,1.17 -2.12,0.93 -3.27,-0.09 -4.35,-1.4 0.03,-2.33 -0.19,-5.65 -3.46,-4.3 -2.45,-1.44 -3.52,4.25 -5.14,5.43 -2.9,1.15 -7.13,0.8 -10.76,0.87 -2.86,0.95 -5.35,2.75 -7.94,2.49 5.27,0.89 -3.8,-0.49 -5.56,1.57 -1.62,1.44 2.98,0.61 1.28,1.92 -1.93,-0.43 -4.04,0.88 -3.2,0.9 -3.13,-1.58 -5.29,2.34 -7.94,1.28 -0.31,-1.39 2.97,-0.16 1.48,-1.37 1.6,-1.89 3.6,-4.31 3.26,-6.92 -0.97,-1.69 1.66,1.48 1.9,1.01 1.71,1.67 2.32,-1.01 2.7,-0.39 -1.01,-1.37 -1.51,-2.43 -3.69,-3.12 -1.7,-0.13 -2.37,-0.91 -5.5,-0.7 -2.21,-0.29 -4.55,-0.92 -4.02,-2.13 -1.25,-0.79 -0.48,-3.83 -3.11,-2.77 -1.63,-1.3 -2.66,-2.91 -5.18,-2.61 -1.96,-1.73 -1.99,1.66 -2.83,0.8 1.4,-2.08 -2.07,1.92 -0.71,0.05 -2.26,2.27 -5.62,0.96 -8.19,1.03 -2.23,-0.69 -4.28,-1.2 -6.83,-1.39 -1.67,0.55 -2.07,-3.73 -2.84,-0.91 -25.35,0.01 -50.71,-0.04 -76.06,0.02 -2.68,-0.23 0.16,-0.72 -1.56,-1.48 0.63,-0.24 -3,0.57 -1.83,-0.34 1.39,0.7 -0.1,-0.79 -0.32,-1.6 1.24,1.36 -2.46,1.77 -2,-0.42 2.07,-0.8 -2.61,1.02 -0.39,-1.08 0.32,-1.28 -1.91,2.58 -1.73,0.17 -1.48,1.22 -3.29,-0.16 -0.71,-0.12 1.57,-1.91 -1.47,1.23 -1.36,-0.4 -1.97,-0.13 0.79,0.11 -0.92,-0.51 -0.99,1.23 -4.7,-0.38 -1.06,-0.09 2.02,-0.15 -5.01,0.1 -1.14,-0.68 0.94,-0.02 -2.68,0.12 -0.33,-0.91 3.19,-0.33 1.8,0.01 -0.1,-0.34 -0.48,1.5 -1.85,-0.46 -0.45,-0.97 1.47,-1.47 2.44,0.87 2.2,-0.78 -2.45,0.23 0.84,-2.3 -1.41,-0.25 -3.01,1.39 -0.76,-1.05 -2.99,0.49 1.7,-2.23 -1.07,-1.48 -1.47,-3.57 1.57,0.74 4.05,0.59 0.74,0.17 -0.3,-1.19 0.68,-1.5 -0.94,-0.78 -1.22,2.51 -3.85,-1.63 -2.38,-0.97 2.73,-0.79 -2.3,0.68 -1.47,-1.41 2.99,1.86 -1.4,-0.85 1.53,0.03 -2.29,-0.51 2.4,-1.37 -0.15,-1.06 -0.32,-1.5 1.91,-0.87 0.29,-1.81 -0.41,3.34 -0.83,0.2 -0.77,-1.31 -2.81,-0.83 -5.59,-1.48 -6.98,-4.04 -1.8,-2.74 -5.22,-3.62 -7.04,-5.94 -2.86,-1.73 -4.34,2.92 -6.63,1.49 -2.57,-1.25 -4.45,-4.25 -6.79,-3.03 z M 221.95,34.56 c 2.71,1 5.66,0.09 8.24,1.27 -2.22,0.9 -9.4,-1.53 -5.2,1.22 1.53,0.7 2.86,0.59 2.55,0.77 0.85,0.02 3.21,0.3 2.68,-0.32 1.12,0.64 1.95,-0.71 1.86,0.68 1.49,0.87 4.22,0.59 2.18,0.28 3.62,0.1 7.47,0.79 10.93,0.47 -1.54,-0.51 -5.12,-1.38 -1.54,-0.59 1.34,0.63 6.02,1.49 2.2,0.35 0.08,-1.25 7.06,1.95 7.56,-0.38 -1.85,-1.2 -2.64,-0.17 -0.89,-1.44 -0.2,1.08 4.01,0.75 4.52,2.45 2.36,-0.76 -0.69,-0.06 1.66,-0.06 -1.72,1.35 2.73,-0.25 1,0.89 2.05,0.14 3.5,1.75 0.3,1.21 -2.45,1.88 2.27,0.12 3.42,0.41 1.54,-0.85 -0.26,0.19 1.01,0.3 0.43,0.41 2.55,1.08 2.17,0.23 0.79,-0.08 0.08,2.04 1.89,0.94 -0.33,0.35 5.21,3.53 0.97,4.11 1.44,1.85 5.43,-0.13 5.88,-0.08 2.46,1.07 4.84,1.21 3.7,1.53 1.81,0.54 -4.94,0.46 -4.75,1.06 0.01,-2.73 -6.31,-2.19 -8.61,-0.71 -1.09,1.08 4.69,2.44 0.63,2.21 -1.59,0.93 -6.76,0.13 -3.2,1.49 -1.08,1.55 0.2,-1.12 -2.6,-0.91 -2.86,-1.04 -3.86,0.32 -5.85,1.69 1.76,1.72 5.55,1.53 6.77,0.64 0.57,-0.23 4.81,0.61 3.08,-0.99 0.05,0.06 1.43,0.67 1.71,0.62 0.12,1.1 1.18,-0.27 2.6,-0.01 -0.96,1.59 3.24,2.18 2.96,2.51 0.09,-0.66 4.07,0.75 0.7,0.77 -2.35,0.11 4.13,1.16 1.91,1.18 2.86,0.97 4.35,0.24 6.19,1.79 2.74,0.47 5.5,1.08 8.25,1.35 -0.11,-1.4 -3.05,-2.23 -4.52,-3.16 0.28,0.51 -6.01,-2.56 -1.86,-1.87 0.75,0.85 2.44,1.17 0.93,-0.21 2.43,1.5 3.49,1.89 4.08,1.85 0.64,0.73 1.16,-0.32 1.42,0.39 0.89,-0.66 3.78,2.4 1.81,-0.11 3.01,0.9 -0.41,-1 0.83,-1.64 -1.68,-1.57 2.26,2.56 1.18,-0.34 -2.02,-0.73 1.47,0.53 -0.06,-0.55 -0.74,0.15 -1.62,-0.58 -2.38,-1.1 2.33,-0.84 -1.46,-0.09 -0.68,-1.53 -1.39,0.67 -1.8,-0.3 -3.29,-0.56 1.09,0.07 -2.18,-0.92 -0.55,-1 -0.61,-0.17 -1.18,-1.48 -2.49,-0.34 0.66,-1.39 -1.83,-1.01 -0.41,-1.69 -3.28,-0.22 -0.55,-0.65 0.36,0.35 1.4,0.92 3.34,-0.39 0.73,-0.91 -0.34,-0.79 1.69,0.46 1.59,-0.39 -2.57,-0.4 1.89,-1.03 1.78,0.63 0.62,-3.6e-4 2.8,0.28 2.55,-0.25 -3.14,2.77 5.99,-1.54 2.2,0.69 0.2,-0.06 -3.5,1.07 -0.92,1.05 1.72,-0.54 -1.63,0.94 0.94,0.09 -2.62,1.42 2.76,0.13 -0.19,1.01 1.53,0.83 1.55,-0.56 2.22,0.61 1.17,0.58 2.78,-0.06 1.4,-1.24 1.62,-0.12 -1.32,-0.27 0.4,-0.69 -0.89,-0.48 1.85,0.54 1.79,0.4 0.67,-0.07 -0.68,-0.71 1.16,-0.73 -2.52,-0.99 -0.79,-0.18 0.82,-0.38 -1.27,-0.83 -3.37,-0.63 -0.79,-0.79 -2.45,-0.74 4.59,0.41 1.13,-0.3 2.25,-0.38 -2.45,-1.11 0.97,-0.46 1.55,-0.43 -1.67,-1.37 -1.27,-0.96 -0.95,-0.51 -0.81,0.6 -1.97,0.07 -0.18,1.16 -0.94,-1.11 -1.87,0.21 -2.87,0.77 3.39,-1.95 -0.02,-1.18 -1.5,1.27 0.78,-0.56 -1.58,-0.05 -3.98,1.04 0.11,9e-5 -0.62,-0.1 -1.35,0.09 -1.97,0.32 -0.19,0.02 -3.48,-0.35 2.89,0.16 -0.07,-0.52 1.73,-0.83 -1.75,-0.41 -0.64,-1.02 -1.8,0.47 -1.63,0.18 -1.1,-0.59 -1.8,0.51 -0.91,1.28 -2.16,0.13 -1.76,1.97 0.61,-1.73 -1.97,0.31 -0.71,0.48 1.07,-1.45 -0.6,-0.45 -0.14,-0.72 -2.05,-0.68 -2.02,-0.37 1.82,-1.02 -3.16,-0.02 0.29,-0.66 3.62,-0.25 -1.59,0.13 -1.2,-0.42 -2.23,0.35 -3.54,-0.45 -1.47,-0.47 -2.95,-0.13 -3.87,-0.62 -0.53,-0.23 3.06,0.22 -1.26,-0.43 1.24,-0.2 -3.57,-0.65 3.2,-0.18 -0.81,-0.82 -1.4,0.06 -3.08,0.56 -0.81,-0.05 -1.69,-0.24 3.47,0.06 -0.32,-0.43 -2.79,-0.31 3.01,0.39 4.25,0.43 3.6,-0.32 -3.05,-1.18 -4.23,-1.1 -1.64,0.31 -5.3,0.06 -1.61,0.07 1.3,-0.57 6.18,0.08 5.48,-1.19 -2.02,-2.05 -6.01,1.39 -7.26,0.68 1.17,-0.37 5.27,-1.09 1.79,-1.36 -2.38,0.73 -3,1.41 -1.98,0.48 -0.04,-0.45 7.62,-1.24 2.53,-1.95 -1.79,-0.5 -5.98,1.51 -2.17,-0.25 -1.99,-1.75 -5.75,4.13 -3.67,0.87 -1.39,0.36 -3,0.73 -0.92,-0.28 2.35,0.43 3.3,-2.55 0.37,-0.87 -2.24,0.27 -3.61,1.29 -3.14,0.22 -1.43,-0.47 6.13,-0.74 2.1,-2.04 -2.47,-1.32 -4.97,2.51 -4.37,0.64 -1.21,0.07 -1.92,1.54 -0.58,-0.15 -0.29,-1.59 -2.71,1.19 -1.59,-0.17 -2.22,0.38 3.08,-1.33 -0.06,-0.85 -0.1,0.74 -1.76,-0.49 -2.05,1.28 -0.09,0.53 -0.27,-0.92 -0.62,-0.7 2.68,-0.88 -3.2,0.48 0.18,-0.36 -3.59,0.15 5.52,-0.88 0.49,-1.42 -1.76,0.39 -4.96,2.19 -1.67,0.37 -0.55,-0.71 -4.52,0.91 -1.06,-0.29 3.82,0.34 0.2,-1.94 -1.12,-1.17 -1.6,-1.42 -9.64,-0.02 -4.52,0.85 4.93,0.93 -4.8,-0.98 -1.85,0.48 1.22,0.45 2.74,1.09 0.5,0.32 1.89,1.25 -2.8,-1.31 -1.85,0.02 0.15,-0.46 -1.27,-1.66 -2.1,-0.86 -0.92,-1.7 -0.87,0.99 -0.75,0.25 -1.18,-0.77 -2.75,1.87 -2.85,0.28 2.17,0.26 -0.56,-0.31 1.3,-1.04 -2.36,-0 -2.3,0.98 -0.14,-0.53 1.4,-0.93 -1.83,-1.85 -2.94,-2.97 -2.09,-0.41 -6.81,0.29 -6.06,0.8 -1.51,-0.32 -5.69,0.56 -1.78,0.82 5.62,0.68 -6.33,-0.58 -1.39,0.53 1.79,0.35 3.42,0.63 0.69,0.19 -4.06,-1.18 -2.46,2.08 -0.25,1.35 1.59,-0.17 -1.23,0.49 1.24,0.8 1.01,0.88 -3.06,-1.56 -3.72,0.14 -2.2,-0.05 1.07,0.69 1,1 2.38,-1.22 2.32,3.18 0.62,1.73 1.64,-1.16 -7.5,1.08 -2.57,-0.17 1.14,-0.24 5.04,-0.29 1.75,-1.12 -2.5,-0.11 -2.64,-1.91 -3.1,-2.94 -2.31,-1.94 7.89,-3.35 2.28,-3.48 -3.25,0.13 -8.61,-0.33 -10.1,2.89 -2.02,0.44 -1.5,2.27 -1.83,3.77 z m 17.02,15.33 c -3.46,-1.07 1.37,1.92 1.69,0.82 -1.41,0.06 -0.8,-0.36 -1.69,-0.82 z m -0.03,9.98 c 1.39,2.91 8.62,-2.63 3.09,-1.16 -1.48,-0.19 -2.07,0.18 -3.09,1.16 z m -30.66,-27.28 c -1.81,0.47 2.85,0.93 2.9,-0.19 3.01,-1.16 -3.6,-1.76 0.63,-1.67 3.1,0.96 5.95,-0.21 6.97,-1.94 1.64,-0.5 3.5,-1.37 0.63,-1.39 -3.64,0.32 -7.1,-0.84 -10.81,-0.27 -3.63,-0.14 2.04,1.87 -1.22,1 -2.17,-0.37 -1.37,3.3 0.12,3.74 -1.18,1.25 2.23,0.47 0.8,0.72 z m 25.75,23.64 c 0.29,3.94 4.97,0.02 6.58,-1 2.13,0.26 4.2,2.58 7.81,1.32 1.24,-0.34 -0.29,-1 -1.25,-1.17 -2.11,0.15 -2.67,0.67 -2.12,-0.99 -2.59,-0.83 -6.29,-3.41 -8.61,-2.21 -0.52,-1.61 -3.19,-3.22 -3.68,0.18 -0.48,2.35 0.11,3.46 -2.81,3.95 0.12,1.08 3.17,-0.11 4.08,-0.09 z m -45.17,-24.25 c 3.8,-0.46 6.48,3.63 10.14,2.74 0.93,-0.26 -1.44,-2.07 0.17,-0.81 2.26,0.66 5.32,-0.78 4.35,-1.07 0.38,-0.85 -1.56,-0.61 0.47,-1 -0.04,-1.67 -2.77,-0.05 -2.57,-1.6 -1.62,-0.13 -4.72,0.32 -1.32,-0.67 1.67,-0.31 2.15,-0.61 0.62,-0.84 3.92,-0.12 -0.43,-1.57 -1.59,-0.84 -1.45,0.8 -5.86,-0.74 -5.09,-0.04 1.1,-0.05 -5.32,-0.06 -1.53,0.68 -0.99,0.09 -4.41,0.02 -1.21,0.74 1.76,0.03 4.83,0.28 1.28,0.42 1.9,1.07 0.27,0.08 0.1,1.26 -1.76,0.95 -5.62,-2.67 -5.98,0.13 0.14,0.4 3.11,0.53 2.16,0.9 z m -14.12,-3.1 c 2.24,0.22 5.97,3.17 6.62,-0.29 -1.89,-0.62 -5.27,-1.3 -6.62,0.29 z m 29.22,14.16 c 1.91,-0.7 4.92,-0.87 1.8,-1.6 -1.15,-0.65 -5.13,-3.31 -6.77,-1.62 2.04,0.59 -2.2,-0.14 -0.12,0.74 -1.81,-0.18 -4.73,1.57 -1.3,1.12 1.42,0.55 4.22,1.16 6.4,1.36 z m 45.69,-12.48 c 3.56,0.26 7.37,0.17 10.86,-0.09 -2.46,-1.8 -5.96,-2.44 -9.17,-2.05 -1.83,-1 -6.3,0.4 -2.53,1.1 0.44,0.19 0.59,0.68 0.84,1.04 z m 17.77,13.71 c -1.55,-0.69 -4.85,0.08 -1.36,0.7 0.54,0.38 3.29,-0.48 1.36,-0.7 z m -9.77,1.64 c 2.22,2.37 8.93,-1.35 4.35,-2.54 -1.39,-0.2 -5.97,0.43 -4.35,2.54 z m -3.45,-5.48 c -1.79,0.27 -3.67,2.44 -0.69,1.01 0.21,0.13 2.11,-1.15 0.69,-1.01 z m -4.31,-0.3 c 5.15,-1.14 -4.52,-0.79 -1.08,-0.33 l 0.5,0.12 z m 2.74,-18.71 c -2.32,-0.15 -2.08,1.4 0.05,0.73 -1.8,-0.43 -0.14,-0.18 -0.05,-0.73 z m -27.39,-1.39 c 0.43,1.41 0.18,-1.87 0.97,0.6 2.23,-0.02 2.72,-0.23 4.93,-0.19 0.99,-0.29 3.8,1.1 6.18,0.38 -2.31,-0.54 -1.89,-0.54 -0.08,-0.38 0.27,-1.28 4.2,1.23 2.77,-0.63 0.74,1.1 4.92,1.1 2.36,-0.19 1.3,0.8 1.93,-0.3 1.69,0.7 1.92,-0.5 3.39,0.45 3.05,0.98 2.86,-0.68 6.45,-0.28 8.4,-1.97 -1.31,-0.94 -5.27,0.78 -3.52,-0.91 -1.71,-0.65 -3.41,0.59 -3.84,-0.18 -1.52,-0.09 -7.18,-0.11 -2.83,-0.21 0.74,0.04 -2.86,-1.55 -0.18,-0.68 3.42,1.1 7.07,0.92 10.03,0.03 -0.04,-0.92 -2.59,-1.33 0.28,-1.16 1.63,0.32 8.82,-0.43 3.65,-0.56 -2.89,-0.26 7.18,-0 2.2,-0.85 -1.84,-0.04 -3.34,-0.32 -0.62,-0.21 1.92,0.69 5.12,-1.1 1.49,-1.04 -3,-0 -1.49,0.18 -1.61,-0.26 -1.45,-0.52 -7.94,1.17 -4.66,-0.02 -2.21,-0.28 -3.99,-0.11 -0.81,-0.12 1.76,0.12 7.78,-0.03 3.06,-0.3 -1.72,0.03 -5.1,-0.06 -1.51,-0.08 2.28,-0.63 7.24,1.14 7.93,0.18 0.4,-0.67 -6.5,-0.28 -7.74,-0.41 -3.17,-0.12 0.65,-0.38 1.46,-0.31 1.6,0.56 3.73,-0.16 0.84,-0.28 -3.45,-0.22 3.09,0.42 4.35,0.26 1.19,-0.16 7.87,-0.32 3.29,-1.04 -2.19,0.18 -2.73,-0.43 -0.25,-0.26 2.8,0.17 5.47,1.21 8.33,0.1 -3.29,-0.2 3.61,-0.39 -0.21,-0.85 -0.85,0.32 -5.31,0.31 -2.45,0.07 -2.95,-0.53 8.31,0.44 3.53,-1.3 3.49,1.42 6.85,-1.38 10.4,-1.6 1.58,-0.76 6.13,-0.33 5.94,-1.37 -3.32,-0.16 -6.7,0.43 -10.04,0.74 -2.98,0.61 -7.36,0.43 -2.64,0.08 1.95,-0.29 6.47,-0.55 6.8,-0.91 -2.4,-0.22 -5.78,0.49 -5.24,-0.2 -4.16,-0.76 3.11,0.64 4.59,-0.04 2.54,-0.07 4.72,-0.12 3.63,-0.22 4.22,-0.36 8.69,-0.25 12.67,-1.73 -1.97,-1.43 -5.27,-0.1 -5.6,-0.49 1.51,-0.47 -3.45,-0.69 -0.73,-0.78 -3.45,-0.49 -6.93,-0.07 -10.48,-0.08 -1.48,0.13 -6.05,0.61 -2.12,0.17 1.76,0.34 6.35,-0.62 2.28,-0.72 -4.28,0.14 -6.88,-0.27 -10.84,-0.3 -3.47,0.11 2.51,0.8 -1.24,0.45 -0.96,-1.2 -9.33,-0 -3.94,0.42 1.12,0.67 -3.67,-0.45 -4.99,-0.7 -1.87,0.32 -7.19,-0.53 -7.26,0.28 1.72,0.32 3.03,0.31 3.06,0.67 2.57,0.03 1.04,0.6 -0.63,0.52 1.18,-1.12 -6.23,-0.8 -4.76,-1.27 -1.66,0.31 -5.91,-0.46 -5.87,0.28 0.46,0.21 4.51,0.6 4.35,0.5 -2.02,0.34 -6.98,-0.82 -7.21,-0.16 5.35,1.05 -6.51,-0.45 -0.87,0.65 -1.68,-0.4 -4.39,0.47 -1.09,0.68 2.15,0.19 6.78,0.7 7.19,1.24 -3.19,-0.42 -6.77,-1.44 -9.8,-1.09 1.29,0.52 3.88,0.47 0.96,0.34 -2.88,-0.58 -7.8,-1.66 -8.62,-0.88 2.78,0.5 -5.6,0.08 -1.6,0.77 1.74,-0.31 5.95,0.84 1.87,0.43 1.18,0.42 -4.86,-0.7 -3.78,0.17 -3.31,-1.43 -5.85,0.41 -8.09,0.01 -1.79,-0.17 -8.35,1.18 -3.44,0.9 1.42,-0.36 6.26,0.04 2.37,0.06 -2.81,1.45 4.19,-0.05 5.53,0.05 3.08,0.18 2.28,0.16 -0.17,0.07 -1.6,-0.23 -6.35,0.82 -2.42,0.8 1.74,-0.03 -5.74,-0.06 -1.53,0.8 4.08,-0.18 8.23,-0.07 12.24,-0.98 3.81,0.26 -4.02,1.3 -5.65,0.94 -1.55,-0.22 -7.83,0.01 -3.69,0.84 4.19,1.31 7.99,-1.72 12.23,-1.08 1.47,-0.54 7.39,-0.18 3.29,-0.18 -2.88,0.61 -7.5,-0.29 -9.37,1.75 2.81,-0.34 6.24,1.21 8.62,-0.74 1.94,-0.27 -1.29,0.74 1.19,0.28 1.33,-0.72 5.21,-0.13 1.76,-0.16 -4.69,0.67 1.96,0.93 3.56,0.42 2.34,0.16 5.74,-1.7 5.37,-1.5 2.11,0.22 2.88,-0.74 3.34,-0.7 2.52,-0.44 2.76,0.02 0.09,0.21 -1.03,0.17 0.24,0.23 -1.75,0.48 -3.29,1.57 5.86,0.16 4.43,0.92 -2.06,-0.06 -7.47,1.4e-4 -7.17,0.78 1.89,0.01 6.11,-0.05 2.11,0.19 -3.77,0.21 -7.58,0.08 -11.33,0.47 -1.3,1.07 5.45,0.64 3.66,1.82 2.07,0.7 7.61,-0.06 2.51,0.51 -3.64,0.26 -6.99,-2.43 -11,-2.06 -1.31,-0.21 -7.75,-0.14 -3.73,0.92 3.6,0.72 -0.04,-0.6 -1.19,0.33 2.38,0.2 6.66,1.96 7.65,2.28 -1.65,-0.54 -4.61,0.01 -1.2,0.14 1.73,0.3 7.25,0.04 6.54,-0.02 -1.28,0.73 -6.41,0.18 -2.45,0.63 1.98,1.12 -4.75,-0.87 -7.35,-0.42 -2.11,-0.32 -5.18,0.72 -5.56,1 -2.26,0.34 1.27,1.26 -1.02,1.15 1.34,0.86 5.84,-1.66 3.84,0.19 3.04,-0.14 4.24,-1.95 4.07,-0.42 3.38,0.47 -0.1,-0.5 -0.85,0.39 5.16,-0.27 -5.39,0.29 0.08,0.57 3.03,0.07 0.18,-0.59 -1,0.19 1.63,-0.44 -0.98,0.82 1.3,0.46 1.21,-0.39 -0.15,0.77 2.06,0.35 1.94,-0.05 6.84,-2.96 2.95,-0.42 -0.4,0.22 -3.18,0.54 -1.25,0.88 -3.4,0.36 -6.03,-0.21 -9.04,-1.41 -1.65,-0.49 -5.65,0.01 -2.57,1.37 2.47,0.02 1.21,-0.05 1.85,0.28 1.82,0.91 -6.32,-0.29 -6.6,1.5 -1.53,0.72 1.85,1.39 1.9,0.38 z m -12.51,-3.14 c -2.8,-0.02 -6.17,-0.78 -8.66,0.32 2.53,0.58 6.54,0.9 8.66,-0.32 z m -1.82,4.12 c 1.71,-0.98 6.9,1.12 4.27,2.72 1.17,0.75 2.53,2.95 4.68,1.28 -0.05,2.09 7.18,-0.38 6.39,0.5 1.45,1.54 5.25,-0.07 6.91,0.56 2.39,-0.96 4.95,1.12 7.37,-0.98 2.5,1.53 6.29,1.19 8.98,0.2 -2.05,-0.7 4.76,0.05 0.77,-0.71 -3.66,0.36 3.57,-1.08 -0.37,-1.34 -1.24,-0.38 -6.4,-1.58 -9.94,-0.84 -1.68,0.03 -7.01,0.4 -6.27,0.96 1.15,0.08 -2.11,0.59 -2.38,-0.08 -2.27,-0.28 -4.74,-0.33 -6.02,-0.19 -1.03,0.01 -4.09,-0 -1.09,-0.56 -1.31,-0.92 -5,-0.36 -5.17,-0.68 2.33,-0.33 2.14,-0.21 0.57,-0.4 -4.8,-0.6 1.43,-0.23 1.54,-0.18 1.44,0.2 3.84,-0.17 0.94,-0.64 -1.35,-0.28 -6.33,-0.28 -2.36,-0.22 0.86,-0.64 -3.59,-0.82 -4.94,-0.46 -1.51,-0.65 -3.64,1.46 -2.2,-0.32 -3.15,-0.59 -6.64,-1.34 -9.82,-0.55 2.88,0.59 -2.14,0.27 0.77,0.78 0.94,0.34 3.95,0.11 1.28,0.43 1,0.78 5.24,0.32 1.96,0.63 1.36,0.23 2.75,-0.17 4.15,0.1 z m -2.52,1.8 c -1.67,0.15 -3.78,0.65 -2.96,0.86 0.25,-0.07 -3.05,1.2 -0.28,0.98 1.68,1.28 10.19,1.81 5.69,-1.29 -0.78,-0.33 -1.62,-0.49 -2.46,-0.55 z m -7.63,4.34 c -2.25,-0.08 -6.35,0.67 -3.12,0.8 0.94,-0.14 2.62,0.23 3.12,-0.8 z m -6.3,-2.43 c 1.95,-0.49 6.91,0.16 5.93,-0.76 -0.98,-0.36 -0.42,-1.07 1,-0.78 -0.7,-0.84 -1.17,-0.48 -0.45,-1.85 -0.44,-1.5 -4.66,-1.41 -2.97,-1.46 -1.84,1.17 -4.07,-0.63 -6.53,0.5 0.81,0.6 3.87,0.43 1.18,0.57 2.29,0.74 2.38,-0.08 1.44,0.79 1.94,0.66 -2.88,-0.43 -4.17,-1.21 -2.5,-0.37 -1.86,1.12 0.2,0.51 -2.51,0.49 0.26,0.71 0.11,1.16 3.11,0.63 -3.73,-0.55 -2.33,0.3 -3.66,0.88 3.03,0.71 4.25,0.37 2.03,-0.01 6.98,-0.52 2.4,0.04 0.61,0.34 -2.29,0.18 -0.3,0.29 -1.51,0.1 -0.95,0.24 -1.92,0.41 1.18,0.47 2.58,0.08 0.67,0.53 0.4,0.39 1,0.45 1.51,0.58 z m 8,-8.05 c 0.59,-0.63 3.62,0.21 5.55,-0.85 -3.24,-0.45 1.93,-0.65 -1.07,-1.05 -2.75,0.29 -5.61,-1.79 -8.38,-0.56 2.04,0.73 -1.78,0.08 0.61,0.95 0.63,0.38 5.09,0.7 1.38,0.69 -1.06,0.24 1.54,0.29 1.91,0.82 z m -22.65,-1.96 c 3.07,1.42 5.97,-0.12 8.83,0.63 2.41,-0.27 5.97,2.74 7.7,0.59 -1.74,-0.45 -1.52,-1.2 -2.01,-1.58 -2.37,-0.48 -5.02,-1.41 -7.65,-1.04 -2.3,-1.33 -5.88,-1.41 -8.24,-0.32 0.53,0.39 3.35,-0.06 1.54,0.82 1.6,-0.32 3.02,-0.53 2.88,0.17 1.79,-0.18 1.42,0.19 -0.07,0.31 3.68,0.42 -1.5,0.28 -2.52,0.27 z m 0.63,3.86 c 3.65,-0.13 -6.13,-3.42 -2.26,-0.72 0.63,0.55 1.51,0.51 2.26,0.72 z m -14.48,-2.28 c 2.73,-0.92 -3.86,-0.4 -5.02,-0.28 -2.04,-0.51 -6.71,0.96 -2.21,1.73 2.09,0.17 5.69,0.59 6.93,-0.85 -1.36,-0.04 -2.73,-0.65 -0.35,-0.58 z m -16.41,8.08 c 0.08,0.64 1.8,-0.2 2.61,0.13 1.2,0.81 2.85,-0.31 1.77,-0.71 1.61,0.24 1.21,-0.8 3.18,-0.34 -3.97,1.62 2.39,0.28 4.08,0.91 1.83,-0.86 3.62,-0.14 0.77,0.2 -1.82,0.23 -7.76,0.27 -6.94,1.29 4.16,1.61 8.39,-0.49 12.53,-0.89 3.24,-0.92 5.57,0.31 8.81,-0.22 4.38,-0.21 1.85,-4.77 -1.3,-2.84 1.73,1.03 -3.16,0.02 -2.86,0.11 0.88,-0.7 -2.53,0.02 -1.14,-1.26 -1.17,-1.88 -4.15,-1.17 -5.97,-0.03 1.67,0.5 4.13,0.73 1.22,1.39 2.13,-0.3 4.88,1.53 0.98,1 -2.52,0.58 -6.09,-0.95 -6.56,-1 0.85,-1.03 -4.96,-0.62 -6,-1.45 -1.53,-1.29 -7.51,0.23 -3.08,0.59 4.24,0.15 -7.21,-0.34 -2.67,0.93 1.44,-0.29 5.72,-0.06 1.95,0.12 -1.83,-0.32 -6.9,0.82 -2.38,0.81 2.03,-0.2 5.05,-0.63 1.55,-0.05 -2.06,-0.04 -6.99,1.04 -2.38,1.27 0.89,-0.15 1.15,-0.23 1.83,0.04 z m -4.54,-2.95 c -1.7,-0.11 -6.96,1.72 -2.4,1.7 0.6,-0.33 3.32,-0.6 2.4,-1.7 z m -6.46,0.79 c 2.43,0.56 -0.35,-2.6 2.02,-0.76 2.44,-0.46 -0.12,-1.35 2.26,-1.45 2.08,-1.56 0.32,1.94 3.27,0.87 0.85,-1.28 5.07,-0.49 2.59,-1.78 3.33,0.35 -2.38,-0.74 0.95,-0.79 2.88,-0.81 -4.56,-1.34 -3.15,-0.32 -3.97,-0.64 -7.71,0.62 -11.35,2.04 -1.63,0.42 -6.95,1.26 -3.67,1.37 -0.26,1.24 4.24,-0.02 4.98,0.16 1,0.29 0.75,0.76 2.09,0.67 z m -14.58,9.4 c -2.3,1.54 -1.01,1.55 0.54,1.65 3.42,0.13 6.1,4.07 9.46,1.34 3.81,0.68 5.01,-3.62 8.94,-3.85 2.34,-1.26 7.02,-0.58 8.27,-2.29 -2.96,-1.47 -8,-2.13 -9.93,-1.41 -1.54,-0.5 -2.05,0.53 -1.73,-0.27 -3.64,-0.98 -7.3,-0.82 -11.03,-0.58 -3.41,-0.58 -1.6,1.64 -0.6,2.18 -1.44,0.47 -3.24,1.27 -2.29,2.13 -1.83,-0.39 -0.58,0.41 -1.13,0.96 z m 42.07,-16.43 c 4.84,0.17 -2.13,-2.1 -3.6,-0.9 -1.48,0.21 -7.75,0.88 -3.46,0.84 2.41,0.09 4.68,0.21 7.06,0.06 z m 37.7,-4.58 c 2.59,0.46 -1.25,-0.02 1.43,0.8 1.38,0.45 7.38,-0.33 3.11,0.35 -1.88,-0.13 -3.02,0.14 -0.95,0.77 1.75,0.42 3.87,-0.67 3.34,0.09 1.68,-0.37 5.08,-0.8 4.62,-0.28 1.84,0.03 4.39,-0.01 1.12,0.08 -5.99,0.49 8.17,0.14 2.33,0.39 -2.42,0.32 -7.75,-0.34 -8.39,0.74 1.69,0.34 3.63,0.51 0.81,0.42 1.88,0.71 8.33,0.47 3.46,0.66 -2.71,0.42 3.58,1.14 4.97,1.15 1.26,-0.5 0.71,-0.8 2.98,-0.22 1.56,0.64 -2.85,-2.13 -0.15,-0.66 1.16,0.93 3.43,0.83 3.11,-0.38 1.57,0.29 2.33,0.35 1.21,-0.66 1.78,-1.99 -0.26,1.35 2.43,0.32 1.27,-1.67 7.19,-0.69 6.09,-1.91 -1.7,-0.84 -5.18,-0.2 -5.85,-0.62 2.82,-1.14 -2.02,-0.3 1,-0.83 -1.49,-0.81 -2.62,0.15 -1.91,-1.21 -1.43,-0.96 -3.51,-0.15 -2.11,0.57 -2.84,0.07 -1.14,-1.36 -4.36,-1.19 -3.03,0.01 -5.63,-2.64 -9.3,-2.54 -1.73,-0.44 -4.4,0.47 -1.16,0.52 4.81,0.36 -4.41,0.29 -0.62,0.44 -2.55,-0.24 -5.52,0.56 -2.85,0.78 0.98,0.09 3.49,0.28 1.02,0.29 5.34,0.82 -4.69,-0.06 -3.4,0.74 1.92,0.57 -4.53,0.16 -0.94,0.63 1.71,0.25 2.27,-0.16 2.51,0.36 1.16,0.33 4.81,-0.46 1.7,0.09 2.52,1.15 -6.92,-0.96 -4.83,0.22 z m -9.72,0.3 c 1.19,0.3 5.95,0.59 2.61,-0.65 -0.72,0.02 -2.82,-0.62 -2.61,0.65 z m 55.89,67.48 c 1.53,-0.33 0.9,-0.42 1.03,0.1 1.65,-1.15 0.91,0.32 1.58,-1.38 -0.59,2.99 1.24,-1.59 -0.84,-0.05 -1.3,1.74 0.63,-1.57 -1.05,0.46 l -0.44,0.4 z m -135.84,9.1 c 0.95,-1.81 0.57,1.37 1.15,-0.93 -1.24,-2.31 -2.12,1.15 -1.42,-0.37 1.49,0.27 -1.42,0.57 0.27,1.3 z m 5.07,7.51 c 0.64,0.57 4.07,0.48 1.67,1.68 3.17,-0.62 1.37,2.05 4.79,0.91 0.71,-0.85 -2.16,1.57 0.36,1.37 2.35,0.91 5.3,0.72 2.46,-1.11 -2.43,-1.05 -4.67,-4.86 -8.34,-4.36 -1.02,-0.82 -6.32,-1.3 -3.1,0.2 1.37,-0.69 1.6,-0.21 0.18,0.41 -0.28,0.95 2.41,0.35 1.99,0.89 z",SV:"m 222.03,196.98 c 0.88,-1.85 3.05,-2.65 4.44,-0.35 3.04,-1.49 2.33,3.55 -0.45,1.68 -0.11,-0.05 -3.19,-0.56 -3.99,-1.33 z",GY:"m 301.66,218.91 c 0.49,-1.79 1.12,-2.46 2.95,-3.04 -2.84,-1.04 1.95,-3.71 1.27,-3.93 2.24,1.68 -0.88,-0.82 1.59,0.77 2.39,0.75 2.17,4.25 2,4.69 1.27,-2.76 5.15,1.96 3.7,3.65 -2.72,-0.18 -3.01,4.7 -0.29,5.18 -10e-4,2.18 4.29,5.02 0.07,3.89 -2.38,0.99 -4.44,3.28 -6.58,0.57 -1.44,-1.81 -0.19,-4.74 0.04,-6.63 -0.61,-1.32 -1.37,-1.38 -1.21,-3.1 -1.67,0.2 -2.75,-0.41 -3.56,-2.05 z",BE:"m 478.93,91.9 c 2.32,-1.15 4.47,-0.04 6.06,-1.1 1.51,0.32 3.18,0.75 3.06,2.03 3.04,0.69 -0.46,2.11 -0.34,3.56 -2.48,-0.6 -2,-2.13 -4.33,-1.33 0.56,-1.8 -4.26,-1.27 -4.45,-3.16 z",GQ:"m 498.59,231.17 c 0.57,-0.61 -0.03,-2.96 2.29,-1.55 3.75,-1.68 3.3,5.15 -0.4,3.16 -0.91,0.03 -4.04,0.11 -1.89,-1.6 z",LS:"m 546.8,319.01 c 0.71,1.9 2.91,4.13 4.08,1.42 4.63,-0.16 1.3,-6.51 -1.79,-3.51 -0.99,0.45 -1.02,1.91 -2.29,2.09 z",BG:"m 533.97,112.48 c 1.59,-2.91 1.02,1.06 3.37,-0.12 3.37,1 6.48,-0.11 9.72,-0.88 2.31,0.28 5.98,1.47 2.27,2.81 -0.57,1.91 -1.71,1.69 0.14,3.21 -1.73,-0.14 -4.89,-0.3 -5.01,1.49 -2.34,1.77 -4.87,-1.4 -7.43,0.27 -2.24,0.89 -1.01,-2.22 -3.11,-2.68 -0.02,-1.82 2.99,-2.17 0.28,-3.6 z",BI:"m 552.39,243.34 c 0.66,-0.13 2.99,0.31 2.86,-1 2.1,-0.75 0.41,1.93 2.05,1.88 -0.8,1.38 -2.12,4.99 -3.98,3.28 -0.86,-1.35 0.21,-2.98 -0.93,-4.15 z",DJ:"m 587.83,204.6 c -0.8,-2.73 3.53,-6.12 4.51,-3.27 -0.46,1.17 -4.12,2.23 -0.91,1.75 1.34,1.79 -2.93,1.73 -3.6,1.52 z",AZ:"m 596.08,123.95 c 1.37,-0.12 4.67,1.29 3.26,2.25 -1.32,-0.22 -2.65,-1.04 -3.26,-2.25 z m 0.72,-4.49 c 1.06,-0.89 6.64,1.92 3.42,-0.83 1.16,-2.58 3.98,3.43 5.82,-0.11 2.25,-1.16 2.64,3.87 5.38,3.54 -0.25,-0.15 -3.6,0.66 -2.8,3.06 -0.52,1.23 -1.34,-0.09 -1.11,2.38 -2.4,0.12 -2.56,-1.81 -1.48,-2.65 -1.92,-2.61 -5.89,3.52 -5.2,-0.31 -0.9,-0.69 -3.39,-1.11 -1.41,-1.99 -1.93,-0.6 -1.07,-2.48 -2.18,-2.59 l -0.09,-0.14 z",MY:"m 776.03,229.79 c 0.94,1.19 3.66,1.54 4.54,1.91 -1.52,-0.7 0.64,-1.85 -0.2,-2.82 0.71,0.24 1.44,-2.13 3.47,-1.6 3.2,0.14 3.49,-7.1 6.03,-3.04 0.76,-0.77 0.48,-3.7 1.3,-1.03 1.04,0.24 -0.67,-1.97 1.23,-2.07 -0.71,-1.32 2.8,-2.95 3.42,-5.2 -0.23,1.83 1.43,-0.64 2.14,1.28 1.13,1.03 -1.26,2.58 1.24,1.51 -0.72,1.4 1.68,0.18 2.51,1.76 3.12,0.67 -3,0.96 -1.88,1.86 2.74,1.53 -1.92,0.86 -2.26,1.45 -1.8,-0.34 -4.97,-1.38 -5.12,1.37 -0.46,2.54 -1.76,3.45 -2.6,5.97 -2.13,1.99 -5.98,-1.42 -7.81,1.64 -2.57,0.56 -6.2,0.31 -6,-2.98 z m -26.46,-12.24 c 0.81,-1.87 3.14,1.48 2.99,2.15 0.86,-1.4 2.33,0.75 2.47,-1.61 3.54,1.64 4.14,5.64 3.78,9.14 1.49,1.41 2.95,5.27 1.51,4.14 -2.37,0.91 -5.07,-2.51 -7.43,-3.83 -0.75,-2.57 -2.09,-3.59 -2.65,-6.4 -0.03,-1.26 0.13,-2.5 -0.67,-3.59 z",PH:"m 820.11,207.51 c 0.82,1.58 -0.23,-2.89 0,0 z m -3.49,-7.05 c 0.99,1.44 3.32,3.35 0.62,3.2 -1.3,-0.61 0.15,1.59 0.72,1.89 0.16,3.43 1.86,0.36 0.58,-1.65 2.72,2.14 1.73,-1.54 1.01,-2.78 -0.18,-1.27 -2.04,-0.65 -2.93,-0.66 z m -2.51,7.05 c -1.05,4.34 3.36,-4.32 1.39,-2.87 -0.2,1.08 -0.99,1.88 -1.39,2.87 z m -0.55,-5.64 c 0.23,-1.55 2.22,1.26 2.48,0.23 -1.22,-1.33 -2.47,-3.02 -2.48,-0.23 z m -1.69,7.23 c 2.83,2.93 1.01,-2.32 2.62,-3.92 -2.35,-1.67 -1.28,2.68 -3.2,2.71 l 0.19,0.64 z m -1.84,6.41 c 0.78,1.98 2.03,-3.59 2.71,-1.05 0.9,-0.8 1.16,1.09 1.97,-0.84 2.89,0.63 -0.14,6.61 4.16,5.12 0.3,0.06 0.81,2.53 1.59,-0.01 -1.3,-1.4 0.21,-5.55 1.13,-2.03 0.43,2.55 0.28,-0.8 0.74,-0.69 1.78,-2.11 -1.11,-3.92 -0.11,-5.38 -0.33,-1.64 -3.69,-3.74 -2.16,-0.4 -1.91,-0.53 -1.87,1.23 -3.48,1.66 -0.94,1.41 -2.07,1.73 -1.41,-0.27 -1.34,-1.08 -3.18,1.53 -4.58,2.3 0.03,0.58 -0.28,1.11 -0.57,1.58 z m -0.15,-12.99 c 1.37,1.3 -1.23,5.28 2.16,2.82 2.7,-2.22 0.02,-1.86 -1.84,-3.29 z m -5.75,-12.27 c 0.17,2 1.5,6.03 2.43,3.8 2.23,0.61 -1.82,2.67 0.72,2.78 0.63,0.68 3.87,-1.27 4.8,1.72 -0.26,-1.32 -0.91,-3.18 0.93,-0.96 0.43,1.22 2.78,1.88 2.56,1.94 0.3,2.23 1.13,-1.46 -0.36,-1.02 -1.03,-1.16 -0.4,-1.39 0.4,-1.67 -1.58,-0.77 -1.61,-0.73 -2.22,0.11 -0.91,-2.69 -2.4,-0.92 -3.06,-0.55 -1.48,-1.61 -2.71,-5.38 0.33,-6.48 0.05,0.11 1.99,-2.95 0.09,-3.94 1,-2.9 -0.05,-1.62 -1.57,-2.04 -3.66,-2.61 -3.34,3.01 -3.47,5.26 0.66,3.49 -1.7,-1.12 -1.58,1.06 z m 1.47,7.51 c 1.09,1.87 3.92,5.29 3.14,0.86 -0.53,-1.08 -2.14,-1.26 -3.14,-0.86 z m -8.51,13.72 c 0.2,1.1 4.23,-2.57 4.46,-3.92 2.15,-0.13 2.43,-3.15 1.62,-3.68 -0.27,1.18 -0.08,1.27 -0.56,1.34 -0.2,1.79 -1.78,1.81 -2.8,3.74 -0.85,0.86 -2.18,1.29 -2.71,2.52 z m 12.81,5.73 c 1.89,1.01 0.24,-1.57 0,0 z",UY:"m 311.36,321.52 c 1.41,-2.04 4.89,-0.87 5.47,1.5 1.9,-0.63 4.87,2.28 6.66,3.89 1.83,0.85 -0.82,2.69 0.03,4.25 -1.55,3.07 -4.83,2.34 -7.67,2.58 -2.09,-0.87 -8.29,-1.8 -5.1,-5.16 -0.91,-2.46 0.84,-4.7 0.62,-7.05 z",CG:"m 502.78,246.68 c 0.46,-1.75 3.43,-0.13 1.65,-2.27 -0.84,-1.77 -0.21,-2.37 2.04,-2.28 -0.05,-3.58 2.35,2.6 3.6,-0.63 1.31,2.9 2.21,-2.62 2.08,-3.94 -1.87,-0.89 -2.27,-3.03 -0.2,-4.28 -0.2,-3.41 -4.84,0.65 -3.36,-3.12 1.95,-2.02 6.63,0.75 8.01,0.25 0.34,-2.92 1.85,-6.38 5.5,-4.76 3.6,0.03 -1.21,4.29 -0.3,6.41 -0.05,3.99 -1.97,6.86 -4.7,9.61 0.43,3.55 -2.12,6.63 -5.2,7.7 0.46,-3.72 -2.66,1.03 -3.68,-0.77 -1.35,-1.81 -3.39,3.15 -3.85,-0.45 -0.43,-0.58 -1.02,-1.01 -1.57,-1.47 z", RS:"m 527.66,115.24 c 1.53,-1.25 3.09,-1.55 4.56,0.4 -0.18,1.45 -3,2.01 -3.18,1.86 -0.05,-1.09 -1.47,-1 -1.68,-2.1 m -0.84,-9.68 c -2.48,0.14 -2.86,2.04 -0.82,2.79 -2.61,0.83 1.29,0.24 -0.71,1.91 -0.45,0.81 2.48,1.6 0.37,1.44 1.92,1.69 -2.56,1.04 0.33,2.32 1.77,1.82 3.34,-0.1 4.34,0.67 2.54,0.13 0.42,2.83 3.17,1.73 1.9,-0.05 0.7,-1.8 2.51,-2.46 -3.46,-1.24 0.89,-5.32 -2.67,-3.79 -2.5,-0.59 -0.92,-2.05 -3.85,-2.59 0.63,-1.14 -1.77,-2.19 -2.66,-2.01 z",ME:"m 523.1,115.89 c 0.25,-1.66 1.44,-3.47 2.89,-1.55 2.94,0.62 1.38,1.64 -0.11,1.93 0.18,2.71 -1.86,0.38 -2.22,-0.09 0.92,-0.03 -0.85,0.25 -0.57,-0.29 z",EE:"m 537.06,70.85 c 0.6,-1.04 1.34,-0.13 -0.09,-0.76 -0.13,-1.61 2.75,-1.72 4.84,-1.83 1.93,-0.52 7.06,0.13 7.88,0.44 -2.59,0.76 -1.03,3.56 -1.05,4.25 -2.65,2.53 -6.13,-2.06 -9.3,-0.16 1.53,-2.48 -0.87,-0.23 -1.83,-1.72 z m -2.39,-1.43 c -3.01,-0.05 0.01,1.64 0.91,0.45 -0.19,-0.33 -0.67,-0.19 -0.91,-0.45 z m -1.65,3.27 c 0.87,-1.16 5.33,-1.53 1.65,-1.89 -3.01,0.38 -1.52,0.02 -1.65,1.89 z",RW:"m 552,242.64 c 0.4,-2.57 3.25,-3.61 4.42,-3.65 1.72,1.72 1.16,4.36 -1.45,3.13 0.19,2.38 -2.84,0.96 -2.97,0.52 z",AM:"m 592.45,119.98 c 2.56,-0.33 5.77,-0.93 5.47,1.17 0.15,0.96 2.85,1.67 0.42,1.97 2.04,0.44 4.06,3.16 1.45,2.96 -0.61,-2.51 -2.88,-1.32 -4.53,-2.8 -4.29,-0.22 -0.87,-1.32 -2.81,-3.3 z",SN:"m 423.83,194.19 c 0.12,-0.09 2.64,-3.19 2.94,-4.92 2.88,-0.61 7.06,-0.8 8.5,2.4 1.83,1.52 3.8,3.4 3.27,5.66 1.46,0.88 3.16,4.02 -0.27,3.51 -2.77,-0.42 -5.35,-1.28 -8.53,-0.81 -1.93,1.34 -6.57,0.51 -2.35,0.14 1.63,0.26 2.32,-0.91 0.58,0.04 -1.62,-0.22 -4.09,0.03 -1.61,-1.6 2.45,-0.5 4.14,-1.3 6.73,-0.4 0.8,-0.66 -2.59,-1.21 -3.7,-1.32 -2.29,1.53 -4.4,-0.28 -3.54,-0.69 1.42,-1.28 -1.46,1.75 -0.92,-0.96 l -0.47,-0.65 z",TG:"m 471.48,204.3 c 2.11,0.43 3.17,0.03 2.83,2.29 2.39,1.59 2.1,5.22 2.19,7.95 -0.53,1.66 1.21,4.69 -1.64,3.54 -2.84,-1.32 -0.4,-4.57 -1.44,-6.53 -0.4,-0.76 0.18,-2.86 -0.71,-2.83 0.99,-1.66 -0.9,-3.13 -1.23,-4.41 z",ES:"m 481.01,124.75 c -3.23,1.01 -1.72,-2.19 -0.44,-1.14 0.66,0.42 1.44,-0.02 0.44,1.14 z m -34.51,-9.69 c -1.12,-1.61 3.86,-1.24 2.45,-1.98 2.63,-0.74 5.81,0.24 9.04,0.28 3.37,0.18 6.58,0.01 9.89,0.57 0.69,1.2 4.87,2.08 5.98,1.12 1.69,1.81 6.14,0.75 6.81,1.78 -0.36,3.24 -6.37,2.32 -7.21,4.71 -1.76,1.25 -3.38,3.65 -1.23,5.28 -3.13,1.3 -1.31,3.64 -4.82,3.61 -1.54,3.44 -6.17,0.78 -8.96,2.58 -1.53,1.81 -3.61,1.22 -4.39,-0.67 1.04,-0.9 -2.14,-1.43 -2.93,-1.86 -0.2,-1.79 2.55,-1.96 0.46,-3.16 2.46,-1.43 -2.21,-3.58 0.79,-3.38 0.67,-1.89 0.25,-4.13 2.15,-5.44 -1.02,-2.01 -5.14,0.52 -5.33,-1.24 -0.81,-0.18 -2.94,0.94 -1.19,-0.8 -1.06,0.5 -0.05,-0.05 -0.72,-0.56 0.34,-0.26 -0.71,-0.18 -0.43,-0.6 l -0.2,-0.07 z",GA:"m 496.4,238.12 c -0.6,-0.74 1.77,0.08 1.29,-2.52 -0.32,-1.18 3.34,0.41 0.71,-0.82 -1.45,-1.4 0.92,-0.16 0.33,-2.07 2.47,0.48 6.01,0.73 4.55,-2.89 1.57,-1.14 6.76,-1.49 5.26,1.87 0.18,0.66 4.28,-0.8 3.14,2.12 -3.42,2.04 1.38,3.08 0.44,5.63 -0.1,0.9 -1.19,4.99 -2.04,2.09 -1.61,3.01 -3.29,-2.85 -4.08,0.77 -2.37,-0.58 -2.23,1.44 -1.11,2.62 0.67,2.35 -1.95,-0.57 -2.11,1.79 -1.37,-1.68 -4.07,-4.42 -3.38,-4.01 -1.66,-0.56 -1.55,-2.01 -1.3,-1.68 -0.96,-0.41 -1.3,-2.68 -0.49,-0.72 1.96,0.14 -1.03,-0.93 -1.2,-2.16 z",HU:"m 516.57,103.77 c 2.11,-0.35 0.34,-2.99 2.43,-2.38 1.54,-1.19 4.7,0.77 6.02,-0.83 1.53,-0.8 3.17,-0.28 4.4,-1.58 1.7,0.3 5.87,0.26 5.56,2.12 -3.03,0.15 -3,4.63 -6.02,4.69 -3.59,-0.54 -7.33,2.83 -10.43,-0.55 -0.9,-0.17 -1.03,-1.52 -1.96,-1.47 z",MW:"m 562.64,274.16 c 0.55,-1.54 1.02,-3.52 1.93,-3.87 -1.59,-2.89 1.41,-4.69 -0.21,-6.99 -2.72,-2.02 1.67,-0.22 2.39,-0.63 1.41,1.41 1.32,4.73 1.73,5.49 -2.65,2.36 -0.17,5.96 2.15,7.65 1.58,2.22 0.74,5.14 -1.29,6.37 1.59,3.5 -2.26,-0.57 -2.26,-2.16 1.69,-2.24 0.19,-4.57 -2.08,-3.73 -0.59,-1.02 -1.84,-1.13 -2.34,-2.12 z",TJ:"m 658.73,125.13 c 1.18,-1.28 4.56,-0.9 4.21,-2.37 -1.87,-0.4 2.45,0.45 1.19,-1.61 1.26,0.02 5.5,-1.48 2.96,0.74 1.05,1.01 2.39,0.38 0.21,1.41 -1.41,-2.67 -5.48,2.23 -1.02,1.02 1.77,0.59 4.14,-0.45 6.02,1.07 1.94,-1.37 4.89,-0.84 4.35,1.59 2.44,-1.12 3.96,2.41 3.17,3.83 -1.95,-0.91 -3.11,0.52 -4.41,-0.44 -2.4,1.75 -6.63,2.78 -5.15,-1.37 -0.71,-0.18 -1.43,-2.98 -3.1,-0.56 -0.59,1.42 -0.71,1.64 -2.7,1.79 0.25,1.79 -2.81,-0.1 -3.84,1.52 -1.76,-1.7 1.84,-3.13 0.32,-5.05 0.18,-1.17 -1.73,-0.61 -2.23,-1.56 z",KH:"m 755.76,198.21 c 0.59,-3.27 5.73,-3.72 7.96,-2.65 1.54,0.74 2.61,1.15 2.58,-0.36 0.93,-0.6 3.89,-0.27 3.75,-0.38 -0.99,1.74 1.79,5.8 -1.09,6.48 -1.92,0.98 -1.78,1.07 -3.53,1.67 0.39,1.22 1.9,2.95 -0.33,1.71 -1.91,0.06 -3.33,2.31 -5.24,1.16 -1.45,0.92 0.24,-2.88 -1.42,-0.89 -1.22,-0.76 -0.38,-2.35 -1.48,-3.16 0.32,-1.4 -1,-2.27 -1.2,-3.58 z",KR:"m 822.96,129.52 c 0.55,0.4 1.63,-2.7 3.79,-1.66 2.05,-1.89 3.71,2.6 4.27,4.45 0.43,2.34 0.11,4.94 -2.71,4.64 0.09,1.13 -2.54,-0.58 -2.41,1.11 -0.67,-0.96 -0.84,1.65 -1.11,-0.18 -1.09,1.41 -2.53,1.09 -2.41,0.33 1.32,-0.25 -1.3,-1.14 0.86,-2.65 -1.8,0.25 1.81,-1.21 -0.11,-0.92 1.21,-0.17 -0.79,-1.02 -0.24,-1.92 -1.84,-0.48 0.09,-1.84 0.89,-0.5 0.43,-0.92 -0.31,-1.19 -0.81,-2.7 z",HN:"m 224.11,195.03 c 0.55,-2.12 3.29,-4.64 5.8,-3.75 3.09,0.04 4.62,-1.16 7.72,-0.16 2.35,0.74 1.48,0.96 1.03,1.19 0.14,0.13 1.39,0.51 1.63,0.41 -0.9,-0.61 2.59,1.32 -0.25,1 -2.54,0.7 -3.88,0.03 -5.35,2.23 -1.06,0.91 -3.73,0.28 -3.8,2.4 -0.9,1.88 -2.04,-1.1 -2.35,-0.67 0.17,-2.12 -3.09,-0.68 -4.43,-2.65 z",IS:"m 405.62,51.63 c 2.16,-0.72 4.6,-0.3 4.87,-0.3 3.05,-0.07 -3.66,1.31 0.45,1.08 0.6,0.6 -3.4,0.12 -4.54,0.62 -3.06,0.86 4.9,-0.38 3.77,1.05 1.24,0.15 2.59,-0.84 0.71,0.43 -0.23,0.35 0.11,0.02 1.38,0.02 -1.11,0.56 -1.87,0.98 -3.46,1.16 1.65,0.86 5.37,-0.27 6,0.57 0.56,0.2 5.43,1.94 7.36,0.32 1.74,-0.34 5.05,-1.16 7.59,-1.9 2.1,-0.2 2.25,-1.4 3.58,-1.59 -0.35,-0.14 1.01,-0.17 -0.66,-0.4 1.02,0.32 2.79,-0.46 0.57,-0.46 2.34,-0.37 -1.51,0.03 0.73,-0.68 -0.59,-1.36 -3.35,0.61 -1.9,-1.01 -2.62,0.82 0.43,-1.03 -2.22,-1.01 2.25,-1.38 0.06,-0.05 -1.29,-0.27 -1.94,-2.31 -2.55,1.21 -4.65,0.17 -0.86,1.48 -4.09,-1.44 -2.19,1.52 -1.07,-2.17 -3.68,-1.57 -4.19,-0.29 -2.21,-2.41 -1.78,0.14 -2.68,0.63 -1.63,-0.67 -1.58,1.8 -2.3,0.13 -1.44,-1.35 -0.37,0.03 -0.02,-1.27 0.3,-1.04 -7.13,-1.94 -3.28,-0.97 0.94,-0.19 -2.39,0.24 0.11,0.57 0.64,1.46 -3.73,-1.44 -2.37,0.21 -2.91,-0.64 1.75,0.89 -1.02,0.28 0.62,0.58 0.96,0.31 1.21,0.62 -1.59,-0.14 -2.67,-0.42 -1.44,0.41 -1.53,-0.52 -2.19,0.25 -0.11,0.35 z",NI:"m 228.97,198.89 c 2.22,1.54 1.82,-2.21 3.14,-1.96 1.8,-0.48 3.25,-0.98 4.5,-2.96 1.86,1.45 5.3,-2.12 4.28,0.65 -0.25,1.86 -0.79,4.26 -0.92,6.21 -0.06,-2.95 -0.25,1.59 -0.43,1.07 -0.08,1.24 0.94,4.96 -1.84,2.87 -3.7,0.58 -5.7,-2.53 -8.45,-4.88 -0.24,-0.2 -0.57,-0.68 -0.29,-1 z",CL:"m 276.37,385.59 c -0.05,-1.26 1.39,-0.84 0.65,-1.59 1.59,0.13 5.04,-1.84 4.57,1.54 0.28,3.88 0.23,5.47 -3.52,4.19 -0.61,-0.05 -2.89,0.29 -1.99,-0.31 -1.32,-0.12 -2.45,0.16 -3.87,-0.54 0.81,0.32 2.1,-0.39 3.66,-0.26 -1.03,0.77 3.05,0.44 0.34,-0.18 -2.36,-1.03 2.7,0.75 2.19,0 1.41,0.25 1.64,1.19 1.82,0.15 -1.76,-0.03 -4.75,-1.9 -1.19,-2.27 1.7,-1.56 -2.52,0.22 -2.66,-0.71 z m 0.26,-98.36 c 2.02,0.25 1.75,-4.14 3.23,-1.04 0.67,2.13 2.81,4.91 1.41,6.76 2.08,1.77 0.96,7.45 4.58,7.14 1.13,3.77 -4.88,3.71 -4.01,6.44 -0.2,1.91 0.59,4.02 -0.02,5.39 -2.29,1.49 -4.86,5.58 -3.56,8.57 -1.57,0.99 -2.42,4.09 -1.59,5.24 0.87,2.47 1.44,3.61 1.51,6.1 -2.23,1.13 -1.01,4.3 -2.21,5.7 -2.53,1.3 -1.36,4.75 -0.67,6.83 -2.18,1.13 -2.17,3.47 -2.36,4.99 -1.25,1.89 0.52,4.42 -1.11,5.57 0.01,2.29 1.19,2.52 1.22,4.43 -1.71,1.55 3.78,1.37 0.43,2.25 -3.22,-0.51 2.82,1.52 -0.57,2.37 0.34,1.34 -0.01,1.58 0.07,3.02 -0.61,1.46 -2.67,2.58 -1.64,4.61 -0.92,1.62 -4.95,3.2 -2.6,5.74 -0.14,2.05 3.62,-0.36 2.65,2.43 -0.47,3.82 6.22,1.32 8.79,2.7 3.36,0.62 -1.14,-0.41 -2.06,0.91 -2.43,0.02 -3.41,1.26 -3.24,3.58 -1.39,1.23 -5.33,-1.82 -3.06,-1.16 -0.68,0.34 1.72,-0.97 0.55,0.44 1.42,-0.91 3.15,-3.11 0.31,-1.56 -2.07,0.42 -1.23,0.09 -2.01,0.85 1.44,1.92 -4.32,-0.94 -0.61,-0.22 -2.27,-2.3 6.73,-1.24 1.42,-1.92 -1.04,0.64 -0.6,-0.68 -1.92,0.43 1.36,-0.03 -1.56,1.89 -1.2,0.55 0.06,-0.49 1.71,-1.5 0.13,-0.76 -1.35,1.4 -0.79,-1.71 -0.7,-1.26 2.17,0.51 1.54,-0.41 2.66,-0.09 -0.44,3.05 1.46,-2.61 -1.65,-1.74 2.46,0.16 1.67,1.45 -0.24,0.34 1.61,1.45 -0.76,0.42 -0.01,0.85 -0.55,0.96 -1.01,-0.22 -0.6,-0.65 -2.03,-0.12 -0.19,-0.88 -0.41,-0.81 0.36,-0.88 -2.69,-0.9 -0.39,-1.76 0.93,-0.49 0.61,-1.34 -0.61,0.07 -1.22,-1.17 1.08,-0.85 -0.96,-1.29 -2.23,-1.65 3.6,0.7 0.23,-0.81 0.76,-0.65 1.68,0.2 0.18,-0.78 0.06,-0.26 0.08,-1.16 1.53,-0.22 -2.34,-0.58 0.83,-3.46 -1.59,-0.6 -1.78,-1.82 1.42,-2.25 -0.36,-2.59 3.47,0.33 -2.49,-2.26 0.43,-1.15 -0.1,-1.19 4.23,1.11 1.71,-0.74 2.06,1.56 -0.88,-2.02 -0.68,-0.25 -2.66,0.18 -2.16,-0.88 -1.02,-0.42 1.18,0.03 -2.05,-1.18 0.26,-1.08 0.88,-0.99 -0.62,0.01 0.16,-0.9 -1.58,-1.09 -1.28,0.64 -2.57,-0.96 0.74,-0.86 -2.96,0.44 -1.14,0.62 -1.94,0.01 1.62,-1.89 1.86,-2.17 -1.22,0 -0.43,-0.85 0.83,-0.49 1.36,-0.61 1.81,1.23 -0.1,0.86 2.31,-0.08 1.35,0.39 1.99,0.86 0.77,-0.64 1.44,-1.95 0.22,-0.6 0.29,-1.86 1.88,-1.77 0.37,-1.87 0.92,-1.46 3.32,-0.04 0.33,-1.04 0.62,-1.23 4.04,-2.35 0.68,-2.97 0.43,-1.15 0.86,-1.04 0.37,-2.49 1.07,-1.47 0.63,-2.47 1.12,-3.09 1.33,1.3 0.21,-1.02 -0.45,-1.02 2.67,-1.44 0.6,-0.53 -0.81,-0.73 -2.55,2.03 -3.01,-3.42 -0.91,-4.9 1.79,-2.07 -1.93,-6.4 0.04,-7.44 1.43,-1.34 1.51,-3.41 3.03,-5.53 1.21,-3.42 2.91,-6.61 2.07,-10.33 -1.29,-2.93 2.03,-4.84 0.3,-7.64 2.01,-2.99 1.42,-7.08 2.93,-10.02 0.06,-3.01 -0.89,-5.37 0.45,-7.38 0.71,-3.26 0.45,-6.62 -0.05,-9.87 -0.4,-0.74 0.29,-1.76 -0.36,-2.34 z m -6.78,74.23 c 0.56,-1.88 -2.75,-0.01 -1.07,0.52 l 0.56,-0.17 z m -1.96,-8.14 c -1.8,-0.98 -3.3,4.48 -0.93,4.4 1.56,-1.42 -0.04,-2.28 1.05,-3.75 z m -3.57,22.95 c 1.89,-0.19 0.91,-1.17 0.82,-3.19 -1.3,-1.25 -0.81,0.99 -1.43,1.06 -0.68,0.31 -1.56,0.59 0.28,1.59 0.73,-3 0.68,0.64 0.33,0.55 z m 0.61,-3.67 c 0.49,-1.39 -0.84,-1.39 -0.53,-0.75 -1.24,0.1 -0.13,0.98 0.53,0.75 z m -1.6,0.03 c 0.75,-2.37 -2.08,-1.32 -0.39,-0.87 0.23,0.25 -0.02,0.69 0.39,0.87 z m 0.53,7.83 c 1.72,0.13 0.88,-1.72 0,0 z m 2.21,-1.79 c 0.43,-1.09 -2.4,-1.19 0,0 z m -1.46,0.76 c 3.02,-1.15 -1.99,-0.87 0,0 z m -1.63,-0.91 c 1.5,-1.81 -1.25,-0.65 0,0 z m 0.8,-1.39 c 0.78,-1.24 -0.56,-0.17 -0.87,-0.44 -0.26,0.69 0.43,0.93 0.87,0.44 z m 4.34,8.49 c 1.83,0.02 -3.39,-1.61 -3.22,-1.3 0.98,0.8 2.22,0.74 3.22,1.3 z m 9.71,5.08 c 1.02,0.7 2.43,1.2 1.75,0.05 1.18,0.28 3.17,1.85 3.46,0.99 -1.07,-0.7 -2.24,-0.5 -0.26,-0.61 -1.89,-0.56 -2.83,-0.44 -0.48,-0.73 -1.32,-0.58 -3.38,0.33 -4.47,0.31 z m -1.47,0.22 c 1.86,-0.63 -1.32,-0.36 0.45,-0.87 -1.44,0.44 -2.68,-0.18 -0.45,0.87 z m -0.07,-2.73 c 0.23,-1.03 0.35,-2.08 -1.05,-1 0.04,0.55 0.53,0.94 1.05,1 z m -2.1,-0.61 c -3.24,-0.16 2.74,1.87 0,0 z m -1.09,-0.2 c -2.37,-0.68 -0.37,1.88 -0.39,0.54 0.69,0.25 0.1,-0.34 0.39,-0.54 z m -3.2,0.54 c 3.23,-0.71 0.16,-1.7 -1.01,-2 -1.43,0.42 1.49,0.88 -0.77,0.48 -1.27,1.1 3.85,0.62 1.21,1.4 z m 16.09,2.7 c -1.18,-1.36 -5.24,-0.27 -2.03,0.39 0.09,-0.66 1.54,0.87 2.03,-0.39 z",MA:"m 435.34,157.82 c 3.51,-1.58 8.67,-3.15 9.9,-7.6 -1.92,-2.47 1.17,-6.42 3.2,-8.19 3.79,-0.7 6.09,-3.66 7.2,-7.08 2.19,-0.65 3.38,3 6.18,1.58 2.19,-0.14 3.29,-0.27 5.09,1.74 -0.08,1.96 1.14,5.01 1.62,6.42 -0.43,1.66 -4.65,-0.6 -6,1.76 -2.79,-0.04 0.86,2.65 -2.45,2.75 -2.89,1.88 -5.76,2.93 -8.95,3.73 -2.19,0.78 -3.99,2.27 -3.3,4.82 -4.14,0.35 -8.33,0.06 -12.49,0.08 z",LR:"m 440.03,216.14 c 1.83,-1.24 2.39,-4.03 4.44,-4.43 2.22,0.14 0.61,5.84 3.27,2.46 2.46,0.33 -1.11,4.3 2.27,3.96 2.12,1.34 1.66,7.23 -1.53,4.32 -3.12,-1.85 -5.71,-4.25 -8.45,-6.31 z",NL:"m 482.62,90.46 c 1.95,0.97 -0.52,-0.48 2.14,-0.3 1.14,-0.06 -3.78,-0.2 -0.57,-1.79 0.55,-2.7 8.79,-4.99 7.11,-0.85 -1.71,0.01 0.69,1.12 -0.61,1.98 -2.17,0.18 -2.33,0.02 -1.76,1.94 0.3,1.35 -2.06,2.03 -0.81,0.3 -2.1,-1.48 -4.93,-0.13 -6.42,-1.21 l 0.29,-0.07 z",CF:"m 511.86,218.62 c 1.77,-2.21 2.85,-4.56 5.73,-5.01 1.22,1.22 4.09,-0.82 6.16,-0.93 1.69,-1.38 -0.16,-2.94 3.02,-2.46 3.37,-0.04 4.4,-3.88 6.79,-5.2 3.27,-1.11 4.41,4.22 3.67,5.65 0.28,0.52 2.21,0.5 2.35,1.77 2.68,0.6 2.73,3.31 5.43,4.47 -0.45,1.76 3.81,3.45 2.66,4.3 -2.17,0.78 -4.86,-1.52 -6.36,0.42 -2.09,-0.85 -4.18,1.49 -6.17,0.83 -1.28,2.77 -6.09,0.99 -7.68,-0.78 -2.93,-1.92 -4.44,3.1 -4.31,3.91 -2.96,-0.58 -6.01,-0.26 -6.14,3.39 -0.3,0.07 -0.9,-2.78 -2.47,-3.26 -1.22,-2.01 -2.17,-4.62 -2.58,-6.92 z",SK:"m 518.91,98.85 c 2.51,-0.63 5.04,-3.35 7.51,-2.21 2.17,0.85 5.54,-0.96 7.91,1.06 -1.09,3.67 -5.16,-0.25 -7.14,2.45 -2.96,-0.08 -5.13,2.79 -8.14,-0.1 -0.1,-0.39 0.02,-0.81 -0.14,-1.2 z",LT:"m 530.27,77.87 c 3.05,-1.8 6.58,-0.36 9.84,-0.7 1.85,-0.36 7.4,2.36 5.18,3.35 -2.43,0.07 -1.76,2.97 -2.5,2.64 -2.47,0.5 -5.42,1.45 -7.68,-0.54 0.63,-3.74 -5.3,-0.28 -4.84,-4.74 z",ZW:"m 541.96,285.73 c 2.74,0.77 5.77,1.04 7.14,-2.41 3.04,-0.69 3.3,-4.86 7.14,-3.72 1.14,1.68 5.13,2 7.14,3.02 -0.44,2.08 0.95,4.75 -0.74,6.34 1.42,1.95 -0.04,3.83 -0.8,6.08 -0.79,2.91 -4.6,4.41 -7.82,3.01 -1.93,-1.33 -5.66,-1.22 -5.41,-4.73 -2.57,-1.13 -5.49,-4.18 -6.64,-7.58 z",LK:"m 693.19,214.25 c -1.23,4.36 3.86,6.27 5.71,2.37 0.06,-3.36 -1.74,-4.91 -3.65,-7.84 -2.19,-1.63 0.81,0.69 -1.39,-0.75 -0.85,0.54 3.19,1.28 0.23,0.77 -0.38,1.5 -0.88,4.98 -1.19,3.97 -0.03,0.52 0.24,0.98 0.29,1.48 z",IL:"m 566.95,147.85 c 1.69,-1.7 1.42,-5.7 3.8,-5.71 1.34,3.02 -2.94,1.31 -1.6,3.99 0.15,0.56 -1.06,2.06 1.16,0.9 -0.44,1.53 -1.37,8.03 -2.13,3.85 -0.53,-0.96 -0.8,-2.03 -1.22,-3.03 z",LA:"m 749.45,178.39 c 1.02,-1.22 1.78,-2.83 3.12,-3.15 1.11,2.39 2.6,-0.56 1.04,-2.12 1.25,-2.09 3.1,1.91 3.89,1.63 -0.97,2.9 2.55,2.37 3.97,2.31 0.59,1.1 0.04,1.11 1.49,2.11 -0.53,1.53 -4.67,1.18 -1.64,2.82 2.73,0.82 3.04,3.56 5.51,5.17 0.43,2.43 3.48,2.66 2.68,4.57 2.36,1.37 -0.81,4.58 -2.32,2.97 -1.92,0.35 -0.3,2.73 -2.87,1.18 -0.88,-0.47 0.94,-2.86 0.23,-3.61 0.57,-1.96 -3.33,-2.69 -2.12,-5.69 -1.55,-2.5 -3.98,-3.46 -5.69,-1.23 -1.67,-3 -5.88,3.86 -4.29,-0.92 0.59,-2.17 0.5,-4.24 -2.02,-4.08 0.79,-1.19 -0.1,-2.42 -0.98,-1.97 z",KP:"m 816.84,122.85 c 2.59,-1.48 5.26,-2.72 7.31,-4.69 0.86,1.47 5.12,1.43 3.03,-0.63 2.63,0.4 4.45,-1.79 5.8,-2.67 2.59,2.04 0.14,1.73 -1.31,3.76 0.92,3.33 -4.08,3.75 -6.09,5.33 -1.11,1.84 1.86,1.45 2.29,3.44 -1.5,1.01 -3.8,-0.13 -4.84,1.89 -1.84,0.14 -2.7,-0.88 -3.21,0.14 0.39,-0.52 -1.55,-0.36 -0.73,-0.93 -2.55,0.34 0.91,-2.31 1.1,-1.75 -2.6,0.02 1.52,-3.15 -1.87,-2.73 0.01,0.5 -1.79,-0.5 -1.45,-1.17 z",GR:"m 545.17,126.04 c -1.44,-0.12 -1.9,-1.54 0.25,-0.61 0.92,0.89 -0.13,0.09 -0.25,0.61 z m -6.34,10.56 c -3.34,-0.05 -0.37,-2.12 0.68,-0.43 1.73,-0.29 5.08,0.32 4.88,0.94 -2.12,0.27 -3.69,0.25 -5.56,-0.51 z m -1.48,-9.18 c -0.69,-1.04 -2.82,-1.58 -0.46,-1.36 1.55,0.16 4.59,4.29 1.65,1.57 -0.35,-0.18 -0.9,0.16 -1.19,-0.22 z m -8.51,0.26 c 2.24,1.03 -1.41,1.08 0,0 z m -1.41,-3.66 c 2.05,-0.05 2.05,-4.11 4.6,-3.45 2.38,-1.45 5.12,-0.88 7.76,-1.89 1.35,1.16 4.93,1.21 4.87,-0.43 2.5,0.73 -0.92,4.05 -2.73,2.24 -1.49,-0.14 -5.95,0.56 -2.92,1.66 1.81,1.58 -3.07,-1.43 -0.56,0.99 -0.98,-0.49 -2.57,-1.08 -0.83,0.16 -1.52,-0.38 -3.14,-3.48 -2.86,-0.2 1.16,1.09 2.41,3.54 0.78,1.83 -0.6,0.79 1.08,1.42 -0.98,1.31 1.52,0.94 4.64,0.97 4.02,3.5 -1.81,-1.95 -3.74,-0.36 -1.59,0.63 -1.18,0.51 -2.83,-1.15 -1.28,1.18 1.3,3.13 -1.11,-0.93 -1.39,1.49 -0.75,-1.26 -1.3,-1.77 -2.09,-1.03 0.25,-1.99 -3.16,-3.34 0.09,-4.11 1.16,0.66 5.81,1.14 2.4,-0.11 -1.57,-0.44 -4.82,0.91 -5.22,-1.74 2.97,0.28 -1.98,-0.52 -2.05,-2.04 z",TM:"m 617.39,118.21 c 2.91,-3.16 6.68,-0.85 9.06,1.25 2,-0.16 4.6,1 3.87,-1.84 1.02,-1.07 4.14,-1.36 3.4,-1.45 -0.27,-1.6 4.14,0.52 4.74,1.36 -0.37,2.12 2.58,2.39 4.69,2.11 1.64,3.69 4.97,5.76 8.74,7.21 1.42,1.64 6.38,0.8 4.33,3.74 -2.75,-1.31 -4.57,1.15 -5.97,3.21 -3.12,0.45 -2.77,2.05 -5.58,2.97 -1.73,-1.19 -3.96,-1.24 -3.1,-4.16 -3.28,0.38 -4.88,-3.4 -8.24,-3.13 -2.82,-1.64 -5.28,-1.49 -8.31,-0.63 -1.37,1.94 -4.87,2.62 -3.74,-0.96 1.59,-2.44 -3.48,-3.29 -1.37,-3.57 1.62,-0.1 -0.89,-1.71 -1.74,-1.3 0.12,-1.81 1.08,-3.34 -0.79,-4.81 z",EC:"m 247.34,241.79 c 0.56,-1.35 0.41,-4.3 1.59,-4.54 -0.23,-1.3 1.79,-2.25 0.9,-3.85 2.19,-0.37 3.44,-2.62 5.64,-0.29 1.79,0.8 3.64,2.47 5.35,1.47 1.38,1.31 2.48,1.02 1.69,1.84 2.06,2.8 -2.75,6.82 -6.09,7.42 -2.67,0.7 -1.82,6.03 -4.71,5.36 -0.68,-1.98 -4.7,-0.73 -2.12,-2.81 -0.74,-1.59 1.65,-2.89 1.07,-5.11 -0.88,2.01 -0.14,0.99 -1.14,1.43 -0.11,1.34 -1.7,-0.51 -2.16,-0.92 z m 2.63,1.34 c -1.83,1.11 0.51,0.96 0,0 z m -32.07,-7.51 c 2.44,0.88 -1.18,3.78 2,2.57 0.3,-0.63 -1.01,-3.21 -2,-2.57 z",BJ:"m 474.11,205.49 c 0.64,-2.71 4.8,-1.26 4.62,-4.35 2.63,-0.84 4.19,3.7 3.22,5.34 -0.27,2.61 -3.28,3.79 -2.53,6.89 0.52,2.5 0.56,5.54 -3,4.76 -0.07,-2.01 0.47,-6.02 -0.42,-8.64 0.25,-2.12 -1.84,-2.22 -1.88,-4 z",SI:"m 509.32,106.07 c 0.74,-0.2 -0.9,-1.4 0.83,-1.36 2.87,1.35 6.07,-2.33 7.79,0.18 -2.08,-0.27 -3.04,2.13 -3.97,2.88 -1.55,-1.27 -4.75,1.19 -3.91,-0.87 -0.71,-0.11 -0.23,-0.5 -0.75,-0.83 z",NO:"m 485.82,62.84 c 1.26,-0.31 3.08,-0.04 0.79,-0.26 1.08,-0.49 -1.94,-0.11 0.31,-0.42 -2.73,-0.44 1.77,-0.61 2.54,-0.58 2.96,-0.13 -3.4,0.05 -2.8,-0.32 0.18,-0.21 -1.19,-1.24 0.51,-0.14 -0.08,-1.13 3.05,0.62 1.67,-0.79 1.3,0.53 1.37,-1.1 2.56,0.3 -0.34,0.37 1.88,-0.53 -0.51,-0.78 -2.77,-0.03 0.17,-0 -0.91,-0.23 -1.15,-0.92 4.72,0.65 2.98,-0.4 2.37,-0.32 1.02,-0.03 -0.53,-0.15 2.37,-0.31 -2.71,0.23 -0.41,-0.24 -1.95,-1.52 6.06,1.08 2.37,-0.39 -0.46,-0.81 2.72,0.79 0.78,-0.16 -0.54,-0.18 0.32,-0.58 1.65,-0.55 -2.26,-0.71 3.37,-0.25 0.63,-0.7 2.72,-0.69 1.59,0.96 3.94,0.08 1.32,-0.25 -0.46,-0.58 2.03,-0.94 -0.86,-0.59 0.82,-0.83 -0.41,-0.8 -2.29,0.72 0.68,0.45 -1.89,1.28 -2.18,0.93 -1.04,-0.98 -1.87,-0.36 -2,0.03 2.16,-0.67 0.46,-0.77 1.65,-2.29 5.41,-0.44 4.07,-1.96 1.06,-0.19 0.73,-0.91 2.73,-0.54 -1.33,-0.24 -4.9,0.82 -2.23,0 -1.27,-0.01 0.67,-0.28 0.88,-0.6 1.91,-0.35 -0.95,0.49 1.42,0.09 2.26,-0.94 -0.76,0.24 -0.5,-0.61 -1.44,-0.76 2.85,0 0.29,-1.01 0.56,0.15 1.66,-1.02 1.99,-0.79 -3.53,-0.71 5.72,-0.93 1.35,-0.94 -2.63,0.49 0.03,-0.21 -0.97,-0.15 -2.11,-0.8 3.27,-0.55 -0.19,-0.97 1.21,-0.03 -0.25,-0.25 1.69,-0.19 -1.98,-0.16 1.79,-0.11 -0.69,-0.54 1.29,-0.53 3.4,-0 2.48,-0.74 1.64,-0.23 4.96,0.02 1.35,-0.35 -2.63,0.16 0.76,-0.49 -0.08,-0.73 0.18,0.17 3.04,0.75 2.27,-0.03 -3.82,0.08 2.74,-0.95 -1.72,-0.34 -2.44,0.43 1.74,-0.48 -0.75,-0.43 0.35,-0.01 5.19,-0.57 1.31,-0.54 0.83,-0.93 3.01,0.03 2.66,0.31 -0.19,-0.22 2.1,-0.43 0.06,-0.61 0.73,-0.28 -1.14,-0.71 1.36,-0.05 -2.96,-1.35 2.82,0.09 2.15,-0.69 2.17,-0.11 -5.74,0.1 -1.42,-0.83 2.36,0.29 -0.05,-0.37 1.87,-0.17 -1.1,-0.4 1.91,-2.29 2.33,-1.5 2.88,0.47 -2.03,-0.96 1.02,-0.36 2.6,0.67 0.2,0.31 0.52,-0.27 -0.36,0.29 -0.21,-1.03 1.65,-0.76 -0.92,2.87 1.93,-2.06 1.63,0.51 -2.83,1.89 2.37,-0.36 0.96,0.08 0.67,-1.45 1.02,-0.35 2.54,-0.88 2.11,1.14 1.05,-0.53 0.67,-0.46 -4.2,-0.81 2.66,-0.25 1.5,-0.5 1.56,-0.3 1.82,0.72 0.1,0.55 1.62,-0.21 3.8,0.73 2.37,-0.46 0.51,-0.18 3.93,-1.1 3.42,-1.5 -0.84,-0.62 4.06,-0.76 3.55,-0.03 -2,-0.06 -2.62,3.07 -0.8,1.13 1.27,-1.29 4.18,-1.93 3.05,-0.85 0.08,1.6 1.9,-0.07 2.23,-0.36 1.63,-0.17 -1.82,-0.38 0.53,-0.66 1.45,-0.88 4.14,0.8 0.91,0.71 1.69,0.19 -1.86,0.61 0.84,0.26 0.39,0.46 -2.03,0.69 0.52,0.55 -1.21,1.59 2.05,-2.74 2.68,-0.49 2.12,-0.11 2.92,0.19 4.66,0.99 -1.47,1.05 -6.54,0.26 -5.78,0.68 2.79,0.32 1.98,1.46 4.58,0.78 2.52,0.96 -1.66,0.18 -2.23,1.2 -1.23,0.67 -3.88,1.43 -1.48,-0.43 -2.59,-1.43 -7.34,-2.38 -9.52,0.4 -0.81,3.51 -5.16,0.84 -7.65,2.03 -2.68,-0.31 -4.71,-2.68 -6.62,-1.11 -2.13,-0.47 -0.16,0.84 -1.93,1.38 1.27,1.67 -5.73,-1.77 -5.14,1.27 -2.3,-0.22 -6.17,1.15 -4.97,3.04 -1.87,1.2 -3.41,2.53 -4.9,2.95 1.17,2.75 -4.41,3.41 -1.14,4.9 -2.33,0.59 -6.57,0.68 -5.72,4.14 0.71,1.64 -0.32,3.59 2.18,3.87 -0.48,1.36 -2.55,1.05 -0.79,2.71 -0.75,1.79 -3.15,1.78 -2.61,4.23 -1.65,-0.06 -3.3,-1.91 -2.68,-2.73 -0.44,1.06 -1.26,1.03 -0.85,1.82 -2.91,0.76 -2.77,1.61 -6.29,3.04 -1.51,0.83 -4.6,0.12 -4,-0.19 -3.57,-0.14 -3.22,-2.92 -1.57,-1.96 3.17,-0.69 -1.7,0.39 -0.07,-0.73 2.73,-0.65 -2.07,0.06 1.27,-1.03 -1.4,0.45 -2.04,0.14 -1.83,0.63 -1,-0.98 -1.25,1.04 -1.45,-0.77 0.61,0.23 -0.14,0.15 1.61,-0.14 -0.48,-0.38 1.39,-0.2 -0.64,-0.45 1.16,-0.92 2.5,-1.84 2.68,-1.25 2.87,-2 -2.26,0.61 -2.59,0.33 0.21,-0.74 -2.15,0.38 -1.05,-1.04 1.41,0.37 0.99,-0.89 -0.44,-0.22 -1.36,-1.08 2.3,-0.14 -0.5,-0.98 -0.02,-0.36 4.94,-1.06 5.19,0.03 0.68,-0.38 2.26,-0.56 1.62,-1.31 0.43,-0.23 -1.2,1.23 -1.35,0.52 -0.78,-0.42 -6.28,1.02 -4.57,-0.44 1.71,-0.1 -0.99,0.12 -0.92,-0.13 z m 58.18,-26.95 c 1.4,-0.42 -3.26,-0.53 -0.61,0.12 z m -7.07,0.64 c -1.13,-0.1 -1.93,0.41 -4.02,0.38 -1.04,0.42 3.23,0.58 4.02,-0.38 z m -12.74,3.08 c 0.69,-0.98 -1.04,0.02 -1.39,0.06 0.27,-0.41 -1.24,1.38 1.39,-0.06 z m -5.05,1.12 c 0.92,-0.07 -1.34,0.93 1.02,0.37 3.71,-0.07 0.34,-1.81 -0.11,-0.84 1.43,0.21 -2.1,-0.12 -0.91,0.47 z m -4.35,1.58 c -0.36,0.38 -1.74,1.62 0.24,0.71 -0.45,1.34 5.44,-1.86 1.36,-0.69 -0.61,1.24 0,-1.89 -1.16,-0.11 l 0.23,0.1 z m -1.49,0.33 c 3.52,-0.83 -1.42,-1.19 -0.91,-0.12 1.28,-0.2 1.46,-0.52 0.91,0.12 z m -3.59,1.17 c 0.03,0.68 2.78,-0.85 0.05,-0.35 0.45,0.42 -0.75,0.04 -0.05,0.35 z m 20,-26.23 c 1.3,0.65 6.35,-1.22 4.36,0.4 1.8,0.27 5.88,-1.01 6.15,-1.4 -0.88,-0.35 -5.46,0.11 -3.2,-0.94 -1.86,-0.19 -5.96,-0.41 -6.72,0.29 2.63,0.3 0.29,1.07 -0.6,1.64 z m -6.29,-6.99 c -3.2,0.5 3.52,1.05 4.83,0.56 1.93,-0.27 7.79,0.02 2.96,0.31 -1.05,-0.01 -3.58,-0.05 -1.09,0.23 -1.45,-0.02 -5.49,-0.27 -2.02,0.45 3.57,0.43 6.3,-0.3 8.6,0.78 3.9,-0.08 6.43,-0.98 9.59,-1.82 2.52,-1.37 -3.26,-1 -4.41,-1.29 -3,-0.64 -3.17,0.19 -6.22,0.31 1.44,-1.86 -2.02,-0.77 -2.25,0.49 -1.74,-0.8 -1.66,-0.54 -4.15,-0.9 -1.83,-0.94 -3.16,-0.83 -2.83,0.02 -2.44,-0.38 -0.91,0.06 -1,0.53 -1.9,-0.54 -4.83,-0.28 -3.25,0.09 0.33,0.22 2.75,0.09 1.23,0.24 z m -20.56,2.17 c 1.43,0.89 1.1,-0.97 2.49,0 -2.27,0.33 1.45,0.91 -1.14,0.67 -3.69,0.56 6.63,1.44 1.44,1.39 1.84,1.17 7.55,0.74 6.04,-0.63 1.64,0.22 3.04,-0.75 2.54,0.55 2.3,0.1 4.53,-1.15 3.41,0.1 3.93,0.29 -3.43,-0.03 -4.61,0.87 -1.97,0.48 -2.26,-0.11 -3.99,0.62 3.09,1.01 6.56,-0.07 9.79,-0.05 0.24,1 -6.32,0.02 -5.92,0.87 1.73,-0.02 5.43,0.79 1.74,0.25 -1.88,-0.45 -6.66,0.09 -2.25,1.16 1.45,0.41 7.32,0.15 3.01,0.52 -1.21,0.56 4.19,1.65 3.74,0.18 1.03,-1.41 3.41,-2.01 4.35,-3.3 1.63,-0.1 0.89,-1.78 3.66,-1.56 1.49,0.13 6.64,-0.54 2.61,-0.84 -2.92,-0.48 -5.6,-0.41 -6.96,-1.93 -0.79,0.3 -3.04,0.67 -1.18,-0.4 -3.75,-1.72 -6.96,10e-4 -4.33,2.1 -1.55,-0.66 -5.56,-3.79 -6.28,-0.8 -1.96,-0.68 -2.15,-0.85 -4.29,-0.82 1.67,-0.05 6.21,-0.84 1.93,-0.68 -2.82,0.37 -4.57,-0.1 -6.49,0.69 0.91,-0.1 -1.62,0.23 0.7,1.03 z m 2.37,2.41 c -1.6,-0.8 -6.01,-2.2 -2.23,-0.37 0.19,0.02 3.62,1.26 2.23,0.37 z",MD:"m 545.77,99.88 c 2.16,-1.57 4.18,0.57 5.96,0.53 1.43,0.26 2.23,2.93 1.96,2.89 2.88,1.16 0.75,2.71 -0.8,1.4 -0.73,1.37 -3.41,5.07 -3.05,1.08 1.27,-2.27 -2.61,-4.76 -4.07,-5.9 z",LB:"m 569.26,142.56 c 0.93,-1.57 2.6,-6.43 4.07,-3.4 -0.89,1.41 -2.1,3.21 -4.07,3.4 z",NP:"m 694.09,154.14 c 0.44,-1.98 1.75,-3.37 3.45,-3.38 2.26,-2 5.02,3.31 7.74,2.77 1.15,2.11 3.46,1.87 4.73,2.94 1.1,0.54 2.98,0.38 5.37,0.84 1.99,-0.52 0.5,2.96 0.69,3.95 -2.76,0.44 -5.03,-0.77 -7.68,-0.9 -2.02,-2.49 -4.77,-1.25 -7.1,-2.59 -2.56,-0.64 -5.72,-2.18 -7.2,-3.63 z",ER:"m 572.96,193.02 c 1.46,-3.19 1.28,-6.2 5.2,-7.02 2.35,-1.7 2.05,5.09 3.76,6.81 0.36,0.9 0.66,-2.05 1.19,0.41 2.35,0.45 4.82,3.36 6.16,4.47 1.19,1.37 3.37,3.11 0.05,2.85 -2.3,-2.41 -4.43,-6.15 -8.29,-5.72 -0.95,-0.46 -2.9,0.78 -4,-1.06 -0.53,1.56 -0.73,2.6 -2.01,1.23 -1.79,1.42 -2.11,-0.3 -2.05,-1.98 z", US:"m 39.4,68.72 c 2.12,0 5.18,-0.56 5.03,-1.27 -1.37,0.24 -4.62,-0.06 -5.03,1.27 z m -2.83,3.6 c 1.56,1.95 3.92,0.16 0.44,-0.05 z m 93.97,55.56 c -2.1,-1.68 -2.06,-4.52 -3.45,-6.43 1.29,-1.2 -0.18,-3.87 -0.51,-5.8 0.03,-2.69 1.18,-2.04 1.07,-4.81 0.64,-2.31 0.08,-5.98 2.07,-5.16 -1.83,-0.29 -2.6,-0.64 -1.21,-1.37 -2.05,-0.06 0.71,-1.42 -0.98,-0.82 -0.61,-1.79 -3.25,-5.33 0.69,-3.45 2.66,0.08 4.48,0.44 2.04,2.21 0.91,0.69 0.04,-1.37 1.69,-1.13 -0.1,1.81 -0.8,1.18 -1.33,2.02 1.29,0.4 2.83,-2.18 1.4,-3.55 -1.06,-0.23 0.75,-0.82 -0.68,-1.65 4.14,-0.26 8.43,-0.05 12.63,-0.12 21.22,0 42.45,0 63.67,0 0.91,-2.58 1.69,1.99 3.95,1.21 2.54,-0.61 3.95,0.79 6.01,1.34 1.49,-0.47 5.36,-0.02 5.2,0.42 -2.03,0.48 -5.87,2.25 -6.06,3.48 1.45,0.16 3.57,-1.22 3.22,0.14 2.55,0.31 6.38,-2.29 7.03,-0.57 2.84,1.09 5.88,0.96 9.07,0.22 -0.19,1.54 2.5,0.4 2.58,1.82 -0.46,1.52 -5.21,-0.99 -7.18,1.42 0.41,-1.34 -2.3,-0.14 -2.59,1.75 -1.95,1.22 -0.74,1.2 0.68,0.46 -0.91,2.73 -2.75,6.57 -0.09,8.99 5,-0.33 1.65,-5.17 3.02,-7.5 -0.02,-1.89 2.7,-2.63 2.22,-1.81 -0.32,1.22 1.23,-2.01 1.37,-1.35 0.55,-2.08 6.21,0.31 4.66,3 -1.91,0.84 -2,3.23 0.26,1.15 2.45,-1.08 2.74,4.46 0.99,4.09 -1.5,1.24 -2.29,2.82 0.36,2.78 -1.13,0.65 4.19,-0.03 5.39,-1.1 2.29,-0.52 5.75,-1.94 5.26,-3.92 2.22,-0.25 8.31,0.81 7.45,-2.17 0.96,-2.37 6.22,-3.12 9.36,-2.62 3.05,0.22 5.28,-0.17 7.03,-2.44 0.47,-2 2.25,-5.46 4.42,-3.82 3.84,-1.28 1.87,4.47 3.91,5.86 2.36,0.89 -1,1.61 -1.92,2.03 -1.09,-0.48 -1.77,0.7 -2.37,-0.16 -1.62,2.63 -2.84,0.88 -2.92,1.85 -0.57,-0.52 -2.76,2.76 -2.77,3.72 -0.5,1.65 3.15,2.57 2.26,1.27 1.44,2.18 -2.44,0.77 -2.26,1.3 -1.36,-0.01 -0.81,-0.79 -2.35,0.69 -1.85,-0.55 -5.89,1.74 -5.94,0.93 0.08,-1.86 -0.43,1.42 -0.64,1.54 1.07,0.31 0.31,2.04 -0.01,1.54 -0.9,2.28 -2.37,3.6 -3.89,0.92 2.26,-2.12 -1.68,0.74 0.9,2.02 1.32,1.16 -1.51,4.32 -2.25,4.43 1.62,-2.59 -0.02,-2.12 -0.66,-3.71 1.06,0.17 -0.7,-0.53 0.22,-0.82 -1.12,-0.18 2.34,-2.82 0.01,-1.31 -1.19,0.08 -0.93,0.86 -0.68,2.93 -1.98,-2.35 1.7,2.29 -1.13,-0.09 -2.04,0.95 0.34,-3.01 -1.33,-0.26 1.01,0.12 4.53,3.02 1.45,1.58 -0.68,-0.81 3.05,3 0.07,1.22 2.49,1.99 0.13,0.79 -1.37,0.35 2.31,1.44 3.97,0.8 4.64,4.16 -1.47,-3.21 -0.73,-1.15 -1.05,-1.15 -0.61,0.62 -1.78,0.54 -2.16,0.44 0.48,0.52 2.54,0.87 2.11,0.85 2.07,-0.15 -1.61,1.89 -1.63,1.17 -2.95,-1.31 2.28,1.44 -1.22,0.87 1.2,0.56 2.95,0.18 0.85,1.03 -3.03,-0.38 -2.37,2.59 -5.29,2.47 -1.86,1.13 -1.92,2.25 -3.66,3.11 -2.2,1.15 -1.84,0.89 -2.24,1.35 -0.94,1.12 -1.34,1.62 -1.66,2.36 -0.67,0.46 -0.34,3.97 0.86,5.96 1.3,1.56 1.85,5.71 0.94,2.06 -0.69,0.74 2.79,4.86 1.56,7.37 0.64,2.75 -3.75,2.38 -2.46,1.67 -1.2,-0.96 -3.12,-3.38 -2.47,-3.59 -0.87,0.38 -0.31,-1.65 -1.14,-0.72 -1.58,-0.99 -0.03,-3.68 -0.91,-2.8 -1.33,1.64 1.35,-4.09 -1.62,-4.2 -1.3,-3.9 -4.43,-0.17 -5.84,-1.39 -1,-1.43 -0.58,-0.62 -3.17,-1.53 2.66,-0.46 -3.13,0.42 -1.44,-0.27 -0.33,-0.07 -1.32,0.56 -1.31,0.44 -1.8,1.72 -0.82,-2.43 -2.08,-0.11 -2.05,0.26 -4.43,0.11 -5.98,0.62 1.83,0.12 1.44,0.49 2.23,0.54 1.48,0.19 -1.78,1.62 1.17,1.98 0.42,2.21 -1.79,-1.1 -2.75,-0.55 0.67,1.59 -3.11,0.78 -3.19,-0.15 -1.68,-1.66 -4.03,1.09 -5.44,-1.14 0.53,-0.41 -2.14,1.69 -1.44,-0.17 0.3,1.13 -4.08,2.03 -1.94,1.22 -0.53,-0.07 -2.13,-1.1 -1,0.52 -2.02,1.83 -3.54,2.46 -3.8,2.22 -1.35,-0.93 -0.48,0.94 -1.72,0.68 -1.48,0.86 -0.03,0.86 -1.96,1.41 2.07,1.13 -2.37,1.66 0.19,1.61 -1.17,1.85 2.48,5.04 -1.08,3.42 -4.63,0.08 -4.65,-5.18 -7.45,-7.69 -1.23,-3.51 -6.12,-3.84 -7.55,-0.56 -3.97,-0.74 -4.02,-5.1 -7.47,-6.6 -2.22,-2.17 -5.82,-1.46 -7.53,0.12 -3.92,0.02 -7.79,0.07 -11.46,-1.56 -4.73,-1.54 -6.98,-3.2 -12.12,-1.91 -1.1,-3.53 -5.01,-4.7 -8.26,-5.41 -1.69,-0.62 -1.71,-3.37 -3.9,-4.43 -1.73,-0.97 0.03,-2.81 -2.33,-3.35 -1.29,-2.34 0.66,-0.68 0.52,-1.17 0.25,-2.59 -2,0.3 -2.1,-1.48 0.73,0.67 -0.18,-0.45 -0.31,-0.41 z M 104.4,80.81 c 0.33,1.25 2.04,0.65 1.2,-0.13 -0.11,-0.61 -0.94,-0.94 -1.29,-1.14 -0.46,-0.02 2.06,0.19 -0.19,-1.1 -0.89,-0.84 -3.75,-1.73 -2.38,-0.68 -2.16,0.77 1.6,-0.15 0.42,1.52 1.05,-0.32 1.26,0.87 0.18,0.86 0.69,0.15 1.7,0.05 2.06,0.68 z m 0.1,-3.67 c -2.34,0.7 2.58,1.37 -0.2,0.35 l 0.21,-0.12 z m -3.29,-0.28 c 2.41,-0.24 -0.19,-1.07 1.79,-0.84 -2.17,-1.81 -3.48,-1.06 -1.79,0.84 z m -7.89,-4.1 c 1.12,1.18 2.64,1.85 1.61,0.35 1.83,1.35 4.07,0.41 1.03,-0.14 -1.4,-0.69 2.07,0.54 1.38,-0.34 -1.51,-0.94 -1.71,0.28 -2.26,-0.83 -2,-0.35 -1.08,1.03 -1.29,0.8 -0.66,-0.5 -1.34,-0.24 -0.47,0.15 z m 6.37,5.31 c -0.01,-0.77 1.79,-2.12 0.45,-1.96 -1.55,-1.36 -0.65,0.17 -0.73,1.05 0.26,-0.11 -0.42,0.78 0.28,0.91 z m -2.69,-1.66 c 0.4,-1.03 1.59,2.83 1.3,-0.2 -0.84,-2.96 -3.62,-2.69 -1.9,-0.7 -0.6,0.36 1.04,0.37 0.6,0.89 z M 0.07,57.92 c -0.4,-1.9 5.96,0.26 2.01,-0.03 -1.59,0.66 -0.57,0.63 -2.01,0.03 z M 20.67,80.91 c -1.34,-0.5 -1.14,0.58 -1.77,0.11 1.22,-0.83 4.87,-3.75 6.46,-2.6 0.35,0.4 2.71,0.41 1.19,-0.48 2.49,-2.14 5.1,-1.84 7.04,-4.16 2.94,1.25 -1.15,-2.12 2.2,-1.81 -3.58,-0.13 3.7,-4.04 -0.4,-1.86 -1.86,1.57 -4.72,-0.66 -2.12,-0.48 -2.6,-1.73 -1.27,3.51 -3.8,0.64 -1.82,-0.72 -4.13,-0.54 -6.71,0.48 -0.61,-0.02 1.89,-1.72 -0.33,-1.64 1.49,-1.38 -2.1,-3.72 -0.13,-4.18 -1.65,0.26 -0.43,2.97 -3.37,2.31 -2.26,0.51 -4.6,-1.57 -5.32,-2.26 0.81,-1.41 2.91,0.03 2.49,0.1 0.64,-0.75 3.62,0.47 1.28,-0.66 1.91,-0.57 -5.18,0.24 -3.21,-0.77 -1.12,-1.18 -0.77,0.65 -2.64,-0.74 1.02,-0.52 -2.33,-0.86 0.32,-0.9 -0.94,-0.56 2.18,-3.04 3.26,-2.16 -1.33,-0.27 1.28,-1.29 -0.53,-0.44 -0.88,-1.58 1.84,-0.74 0.88,-1.63 2.88,-0.58 1.38,1.17 3.97,0.24 1.37,-1.94 7.74,0.22 5.33,-3.41 -2.71,-0.4 2.89,-0.61 0.23,-1.59 -0.7,0.36 -2.98,0.31 -4.67,1.54 -0.62,-0.83 -2.2,-1.05 -0.89,-0.26 -2.9,-1.25 -7.61,1.13 -9.42,-1.3 -3.9,-1.57 3.76,-0.3 -0.4,-1.33 -2.08,0.34 -5.88,-1.45 -2.97,-0.96 1.88,-0.94 3.67,-1.4 5.92,-1.25 -2.66,-0.56 6.5,-2.5 4.29,-0.4 0.32,0.68 5.32,1.26 6.89,0.05 3.27,0.23 -1.44,-0.02 -1.57,-0.95 -3.08,-1.31 0.04,-0.48 1.12,0.3 1.36,0.43 6.18,0.27 2.37,-0.36 -0.73,0.47 -4.09,0.17 -2.23,-0.92 -2.45,-1.03 -4.35,0.94 -7.06,-1.7 -1.65,-1.27 -6.85,-1.73 -6.54,-2.33 2,-2.26 7.61,-1.14 9.13,-2.59 2.26,-3.26 5.43,-2.28 8.87,-3.57 -0.37,1.11 0.32,0.47 1.97,0.2 -3.35,0.42 -1.08,-1.3 -0.36,-0.75 4.21,1.35 7.14,-3.3 10.49,-0.78 -3.14,1.51 0.55,-0.02 1.71,-0.13 1.84,0.3 0.02,1.06 2.4,0.87 2.04,-1.23 7.42,0.57 4.26,0.68 2.93,0.38 5.81,0.31 9.47,0.19 3.54,0.82 7.19,0.78 10.75,1.26 3.13,0.64 5.69,-1.02 8.99,0.57 5.49,-0.45 2.11,5.94 3.07,9.47 0.65,5.66 -0.39,11.42 0.14,17.05 1.93,1.51 5.09,-0.95 5.87,1.33 2.45,1.47 5.47,4.28 7.14,0.99 3.25,-1.67 4.58,2.07 7.43,2.95 2.73,2.04 3.75,5.53 7.2,5.96 2.91,-0.05 4.17,3.88 1.06,4.73 -0.43,-0.14 -1.2,-0.77 0.58,-1.57 -1,1.16 -2.73,-0.19 -0.38,0.05 -1.54,-0.37 -0.74,-3.32 -2.81,-1.78 0.23,-0.76 -2.33,2.61 -1.14,-0.16 0.25,-1.29 2,-0.1 -0.16,-1 0.54,1.59 -2.17,-1.07 -0.19,-0.1 -1.39,-1.21 -1.71,-1.91 -3.82,-2.35 1.92,-0.47 -0.2,-0.37 -0.38,-1.4 2.86,1.19 -1.63,-1.11 1.31,-0.45 -2.03,-0.04 -1.48,-0.27 -2.63,-1.03 2.12,-1.23 -2.64,0.72 -2.56,-1.52 -1.39,-3.25 -1.37,-0.67 -0.21,1.37 -0.78,-0.06 -3.5,-0.43 -1.91,-1.68 -1.16,0.41 -3.82,-0.64 -3.15,0.07 1.24,0.19 3.5,1.5 1.06,1.36 -0.35,0.97 -4.8,-1.66 -5.23,-2.26 -1.97,0.64 -4.62,-2.08 -2.4,-1.81 -0.08,0.76 2.12,-0.31 -0.26,-0.45 -2.5,1.73 -4.67,-0.19 -6.26,0.02 -2.92,-0.98 -7.63,1.05 -8.66,-1.54 1.63,-1.19 -3.96,1.85 -2.33,-0.39 -1.99,0.22 -0.65,-0.38 -2.67,-0.13 3.29,-0.61 -2.25,-0.23 0.86,-1.07 -1.41,0.17 -3.88,0.35 -3.56,0.28 -0.9,1.2 -1.15,-0.87 -0.97,-0.57 -0.84,0.92 -2.12,-0.02 -0.89,0.61 -3.1,0.9 1.81,0.48 -1.22,1.4 3.19,-1.11 0.67,0.89 1.28,0.62 -0.36,1.79 -3.91,0.06 -3.91,1.21 -0.34,-0.05 -2.06,1.11 -2.04,0.69 -1.52,0.4 -2.66,1.72 -4.47,0.64 1.73,-0.82 3.58,-1.19 0.78,-0.91 -1.15,-0.94 2.13,-1.74 0.78,-2.95 1.59,-1.26 6.46,-0.37 6.08,-0.55 -2.11,0 -3.05,-1.57 -0.34,-1.59 -2.86,0.34 -5.31,0.77 -7.89,2.48 -0.42,2.05 -3.85,-0.08 -1.12,1.68 -1.77,0.87 -1.55,0.52 -3.27,1.61 -2.15,0.5 -0.73,0.95 1.12,1.69 -3.04,0.92 -1.89,2.07 -4.37,2.23 -2.09,1.25 -4.42,1.37 -4.9,2.88 -2.63,0.77 -2.93,1.18 -5.09,1.68 -0.74,0.59 1.6,0.19 0.03,1.2 0.26,-0.8 -2.29,0.3 -2.85,0.95 -0.77,-0.7 -2.72,0.37 -4.25,0.69 -2.45,0.36 1.11,-0.77 -1.44,-0.56 -0.68,2 -2.34,1.34 -2.73,1.35 0.35,0.11 0.1,0.54 -0.14,0.36 z m 26.99,-8.45 c 1.93,-0.85 3.42,-0.67 0.41,-1.24 -0.19,0.63 -2.14,0.53 -0.41,1.24 z m -2.98,3.04 c -0.67,1.22 1.57,-0.57 1.01,-0.48 1.9,-0.39 0.78,-0.73 2.64,-0.74 -2.75,-0.74 2.92,0.27 0.56,-1.22 -0.86,-0.19 -2.3,-0.51 -2.04,-0.25 -1.04,0.87 -2.82,-0.49 -1.47,0.92 -0.85,0.3 0.26,0.88 -1.18,-0.24 -3.45,-0.69 0.16,2.92 -0.51,1.47 -1.1,0.14 2.13,-0.32 0.99,0.54 z M 8.31,66.85 c 1.35,0.96 6.08,0.52 3.05,-1.04 -0.44,0.04 -5.38,0.22 -3.05,1.04 z m 17.8,12.93 c -1.12,1.49 1.56,0.1 0,0 z m 36.12,-12.93 c -0.52,1.89 3.71,-2.69 0.58,-0.16 z M 14.45,82.02 c 0.46,1.06 5.29,-0.53 4.55,-0.44 -0.89,-1.32 -3.62,-0.52 -4.55,0.44 z m -7.91,3.79 c 2.65,-0.21 5.15,-1.86 2.83,-2.13 -2.47,0.5 1.02,0.87 -1.38,1.1 1.05,0.67 -3.21,0.78 -1.44,1.03 z M 106.8,80.01 c 0.83,-0.58 0.7,-0.69 0.73,0.13 0.87,0.7 2,-1.92 -0.5,-1.82 0.13,0.39 -0.84,0.05 -0.23,1.69 z m -8.44,-6.19 c 1.08,-0.61 -0.74,2.49 1.17,0.71 1.96,0.03 -1.92,-3.63 0.88,-0.96 -0.99,-2.16 -4.33,-3.14 -2.05,0.25 z m 881.51,16.69 c -0.15,-1.56 3.37,-0.74 0,0 z m 16.68,-33.32 c -2,1.09 -3.57,-1.54 -0.74,-0.51 1.3,-0.8 4.62,0.32 2.98,0.96 -0.08,-0.58 -3.3,-0.68 -2.25,-0.45 z m -10,31.78 c -3.35,0.61 -0.63,0.14 1.28,0.03 0.08,-1.38 -0.06,-0.71 -1.28,-0.03 z m -8.59,1.35 c 2.07,-0.17 2.04,-1.3 0,0 z m -0.77,0.17 c 2.08,-1.56 -1.65,-0.91 0,0 z m -25.36,-3.25 c 3.52,-0.32 -2.58,-1.27 -0.45,-0.18 l 0.19,0.1 z M 40.21,182.25 c 3.59,-0.27 1.71,-4.09 -0.35,-3.14 -0.51,0.81 -0.55,2.33 0.35,3.14 z m -1.97,-4.52 c 3.2,-0.59 -2.98,-2.16 0,0 z M 227.78,102.47 c 1.48,-2.16 -4.13,2.08 0,0 z m 40.46,18.76 c -2.1,0.97 -1.64,-1 1.1,-0.77 1.53,-0.1 2.19,-1.01 1.41,0.06 4.61,-1.37 -1.64,1.1 -2.51,0.71 z",KZ:"m 635.48,106.92 c 0.85,0.58 0.85,1.1 1.4,0.98 -0.06,1.45 -2.75,-0.73 -1.37,-0.98 m -34.27,-8.19 c 1.87,-1.3 -0.29,-2.91 1.88,-4.33 0.77,-2.08 4.91,3.38 3.99,-0.56 -0.22,-1.67 5.03,-2.02 5.66,-3.84 2.5,1.26 3.81,-0.22 6.2,0.77 2.81,-0.79 4.79,4.77 4.62,1.32 2.54,2.56 4.49,-0.52 7.02,-0.04 1.56,0.8 3.36,-1.01 4.84,0.91 2.25,1.26 2.97,-1.12 5.37,0.13 4.15,-1.28 0.43,-2.62 -1.75,-3.35 -2.3,-0.64 4.55,-0.85 1.32,-2.37 0.62,-1.47 6.13,-0.31 2.29,-1.49 -2.87,-0.18 1.71,-1.17 -1.18,-0.99 -1.25,-1.99 3.47,-0.56 5.06,-1.45 2.11,-0.76 5.14,-0.05 6.85,-1.43 3.21,-0.32 6.7,-0.54 9,-1.89 1.36,-0.49 4.81,-0.11 6.37,0.69 0.83,1.68 -0.1,3.23 2.83,2.23 0.89,-0.92 1.32,1.48 1.88,0.55 -0.43,-0.19 5.25,-0.11 1.72,1.04 2.26,1.53 5.58,-1.12 8.38,-1.86 2.56,-0.65 0.87,0.29 0.7,0.89 3.19,1.36 5.73,3.73 7.7,6.55 0.84,2.51 2.64,3.17 3.86,1.09 1.75,1.5 4.49,2.17 7.47,0.82 2.83,1.04 4.28,4.83 7.79,4.13 1.77,-1.65 1.23,0.86 3.1,1.28 -2.05,-0.27 -2.18,2.4 -4.57,2.28 0.3,1.93 -0.29,4.78 -2.86,3.6 -2.35,0.22 -5.16,-1.75 -5.54,1.95 -1.78,1.45 -0.09,2.31 -0.4,3.3 -2.16,-0.69 -5.75,-0.33 -7.13,0.85 2.3,-0.22 0.81,3.12 2.35,4.62 -1.09,0.68 -1.69,0.94 -1.55,2.8 -3.23,-2.28 -7.38,-2.07 -11.24,-2.07 -2.43,1.3 -7.39,-3.12 -7.37,1.39 -2.92,-1.13 -6.59,-1.72 -8.04,1.07 -1.93,0.67 -5.37,1.79 -5.58,4.09 -1.83,-0.48 -1.91,-2.53 -4.49,-1.42 -1.63,-0.37 -1,-2.74 -2.74,-3.09 1.03,-2.29 -1.64,-3.09 -3.39,-4.11 -2.22,0.8 -4.63,-0.29 -6.91,0.43 -2.26,-0.9 -5.8,-3.2 -5.06,-5.65 -2.74,-0.28 -1.35,-0.29 -0.6,-0.7 -0.37,-0.54 0.4,-0.87 -1.21,-0.94 -0.69,-2.11 -0.39,2.46 -1.53,0.68 -1.61,-0.35 -2.77,1.73 -5.65,1.57 -5.86,-0.35 -2.77,6.16 -3.42,9.74 -0.79,4.85 -4.39,-3.89 -7.64,-1 -1.94,1.39 -3.02,1.34 -1.61,-1.03 0.38,-0.93 -3.8,-0.84 -3.84,-2.55 -0.49,-1.71 -5.18,-3.45 -1.17,-3.16 2.17,0.77 1.7,0.06 0.4,-0.86 0.58,-2.69 6.52,-0.37 5.37,-1.44 -0.75,-0.8 1.98,-4.85 -1.49,-4.23 -2.7,-0.43 -5.14,-0.39 -7.64,1.08 -0.69,-0.24 -2.03,1.33 -3.14,-0.52 3.21,0.38 -1.77,-4.38 -3.24,-2.86 -0.39,0.14 -1.02,-0.86 -0.39,-0.93 -0.16,-0.54 -0.54,-0.79 -1.12,-0.79 -0.49,-0.05 -0.8,-0.43 -0.51,-0.89 z",SZ:"m 557.32,310.75 c 1.26,2.66 4.82,1.23 3.68,-1.1 -1.1,-2.6 -3.34,-0.91 -3.68,1.1 z",UZ:"m 634.94,107.63 c 2.49,0.38 1.12,2.32 0.89,2.72 -1.58,0.97 -0.69,-0.89 -0.98,-0.64 1.01,-1.7 -0.67,0.41 0.09,-2.08 z m -7.78,1.44 c 1.67,-0.03 7.15,-2.58 6.73,-0.72 -1.66,2.59 1.05,2.25 2.03,3.18 0.97,0.87 3.67,-0.43 4.43,-1.27 2.09,2.38 4.37,3.47 7.5,2.72 2.67,0.25 5.12,-0.95 6.82,1.84 1.29,-0.11 -0.59,3.17 1.78,2.97 -0.51,3.95 4.61,0.47 4.78,3.45 1.45,-0.04 2.7,-3.27 5.3,-3.4 1.18,-1.19 4.61,-1.27 1.45,-0.08 -3.27,0.81 2.64,3.36 2.68,0.97 0.4,2 6,1.64 2.49,2.82 -1.22,0.94 -3.79,1.37 -6.13,0.45 2.54,-1.53 -0.46,-1.91 -1.88,-0.75 -1.91,-1.12 -0.27,2.46 -2.47,1.25 0.35,-0.03 0.17,2.48 -2.73,1.7 -3.17,1.35 2.25,1.41 0.93,3.28 1.64,1.79 -1.59,4.69 -3.64,3.06 -1.43,-0.27 0.25,-2.77 -2.4,-2.43 -3.38,-1.17 -7.08,-2.94 -9.81,-5.29 -0.68,-3.64 -3.37,-3.1 -6.13,-3.51 0.41,-2.53 -2.07,-2.53 -3.94,-3.75 -2.78,-0.67 0.42,1.77 -1.96,0.57 -0.85,1.13 -3.88,1.11 -2.72,3.36 -5.77,1.58 -1.93,-7.55 -3.09,-10.4 z",MN:"m 715.47,97.3 c 2.21,-1.36 4.58,-0.74 6.08,-2.4 2.56,-0.79 5.4,-2.44 7.78,-2.08 1.76,0.76 4.35,0.26 5.77,2.15 3.17,-0.53 7.33,2.18 9.44,-1.13 -2.94,-2.15 1.68,-5.81 3.83,-4.23 2.9,1.17 7.1,0.22 7.76,3.94 3.36,2.26 6.88,-0.66 10.43,0.34 3.49,0.12 5.14,3.12 8.55,3.04 3.6,0.7 7.68,-0.1 10.9,-1.49 3.04,-2.41 6.37,0.05 9.52,0 -1.05,1.66 -1.83,3.52 -3.06,5.03 1.09,1.42 4.03,0.25 5.64,0.57 2.34,-2.62 9.85,4.62 3.86,3.15 -3.45,-0.03 -6.71,0.89 -8.92,3.33 -3.61,-0.16 -6.31,3.89 -9.99,1.24 -2.64,0.18 -2.1,2.81 -0.75,3.9 -3.19,1.47 -5.91,4.43 -9.81,3.6 -3.87,-0.76 -7.6,2.86 -11.05,1.72 -3.6,0.51 -6.48,-2.04 -9.86,-2.18 -3.79,0.03 -7.61,-0.37 -11.42,-0.41 -2.84,-0.96 -3.18,-5.25 -6.83,-5.27 -2.75,-2.39 -8.44,0.46 -9.88,-3.04 2.35,-1.88 -0.41,-6.01 -2.61,-6.12 -1.91,-0.46 -5.47,-1.59 -5.22,-3.38 z",BT:"m 718.13,159.29 c 1.2,-1.82 3.41,-4.27 5.2,-2.5 2.47,-0.34 3.75,1.18 3.95,3.26 -3.04,-0.14 -6.66,1.35 -9.15,-0.76 z",NC:"m 927.62,293.83 c 1.71,1.96 4.72,3.95 6.28,4.45 3.03,0.03 -2.85,-2.47 -3.41,-3.67 -1.14,-0.87 -5.55,-4.18 -2.87,-0.78 z",FJ:"m 971.09,281.12 c -2.23,-0.19 -6.1,3.14 -2.02,1.59 1.31,0.61 2.75,-1.28 0.65,-0.15 0.53,-0.42 0.79,-1.08 1.36,-1.45 z m -5.31,8.4 c 2.08,-0.7 -0.46,-0.63 0,0 z m -1.54,-2.83 c 2.75,1.62 4.33,-1.95 1.39,-2.17 -1.03,-0.23 -3.15,1.74 -1.39,2.17 z",KW:"m 601,153.77 c 0.56,-1.79 3.68,-4.46 4.28,-1.29 -2.6,-0.55 3.37,3.63 -0.75,2.94 -1.1,-0.89 -1.42,-2.21 -3.53,-1.65 z",TL:"m 815.54,262.09 c 2.29,-1.66 1.58,1.43 0.01,-0.01 m 3.21,-0.55 c 0.81,0.53 5.95,-1.08 5.79,-2.43 -1.86,0.72 -6.82,-0.29 -5.79,2.43 z",BS:"m 267.62,176.8 c 0.17,-1.42 3.32,-0.79 0.79,-0.07 z m -8.13,-12.64 c -0.72,-1.44 2.35,2.69 1.06,1.5 0.67,-0.84 -0.6,-1.2 -1.06,-1.5 z m -1.78,-1.31 c -0.33,-0.79 0.35,-3.18 -2.04,-2.86 1.85,-0.39 2.76,1.35 2.04,2.86 z m -1.2,6.06 c 0.21,0.35 -1.09,-1.92 -0.14,-1.27 -0.3,0.42 1.04,-0.31 0.43,1.01 z m -1.5,-4.16 c 3.19,1.57 -1.35,3.32 -0.11,1.1 0.29,-0.29 0.18,-0.76 0.11,-1.1 z",VU:"m 938.23,281.65 c 1.61,-0.58 -1.85,-0.74 0,0 z m -1.79,0.66 c 2.46,-0.54 -2.57,-3.16 -0.26,-0.67 l 0.07,0.35 z m -1.18,-4.14 c -0.37,-0.81 -1.02,0.61 -1.56,-1.17 -0.6,1.6 1.95,4.29 1.56,1.17 z",FK:"m 371.78,390.14 c 1.36,-0.97 -1.93,-2.38 -3.9,-2.41 -3.05,-0.14 0.58,0.28 0.74,0.68 1.16,0.24 2.29,0.94 3.16,1.73 z m -65.58,-7.9 c 1.78,1.15 1.13,-0.67 2.93,-0.17 -1.32,-0.96 3.77,-0.56 2.02,-1.48 0.92,-0.53 -1.79,-0.21 -0.97,0.15 -0.75,-1.74 -2.42,0.03 -2.42,0.37 -0.86,-0.02 -0.93,1.02 -1.56,1.13 z m -1.55,-2.08 c 0.91,0.01 0.35,-0.34 0,0 z m 0.78,0.07 c -1.32,0.17 -2.19,-0.12 -0.23,0.83 -3,-0.14 0.37,0.45 -0.87,0.2 -0.08,1.01 -2.44,0.7 -0.81,1.17 1.24,-0.02 6.78,-3.02 1.91,-2.2 z",GM:"m 425.26,198.03 c 0.71,0.78 4.95,-0.82 1.67,-0.13 -2.57,-0.56 2.45,-0.38 3.36,-1.13 0.97,0.82 5.3,1.12 1.67,1.51 -2.03,-2.19 -5.77,2.16 -6.69,-0.26 z",QA:"m 612.64,164.51 c -0.22,-1.8 2.78,-3.37 2.25,0.05 0.69,2.01 -2.87,3.02 -2.25,-0.05 z",JM:"m 254.5,184.26 c 1.77,-2.44 8.32,1.83 4.21,0.87 -1.02,1.21 -2.73,-0.09 -4.21,-0.87 z",CY:"m 561.46,137.2 c 1.04,-0.9 5.64,-1.47 5.97,-1.65 -1.79,1.36 -1.58,1.89 -3.94,2.84 -0.7,-0.33 -1.76,-0.28 -2.03,-1.19 z",PR:"m 288.97,185.11 c -1.94,1.26 -5.5,-1.35 -2.16,-1.59 0.95,0.19 4.71,0.05 2.16,1.59 z",PS:"m 568.89,145.08 c 2.09,-3.37 1.99,4.04 -0.09,1.92 1.69,-1.65 -0.49,0.09 0.09,-1.92 z",BN:"m 788.31,222.72 c 1.27,-0.22 3.44,-2.27 1.95,0.11 0.51,2.62 -1.33,0.75 -1.95,-0.11 z",TT:"m 300.22,207.39 c 2.05,-0.48 0.12,-3.15 2.58,-1.96 0.31,2.2 -0.78,1.84 -2.58,1.96 z",PF:"m 57.69,285.59 c 1.46,0.8 -1.48,-1.8 -0.45,-0.06 z",WS:"m 991.64,273.63 c -1.72,1.32 3.38,0.32 0,0 z",LU:"m 487.81,95.39 c 0.65,-2.6 3.8,2.42 0.17,0.89 0.5,-0.17 -0.04,-0.74 -0.17,-0.89 z",KM:"m 592.41,269.2 c 0.31,-2.77 -1.68,-1.13 0,0 z",FO:"m 452.8,60.76 c -2.56,-1.32 1.36,1.68 0,0 0.91,0.61 0.44,-0.7 -0.34,-0.19 z",SS:"m 566.64,207.99 c -0.19,2.08 0.23,4.73 -2.7,4.14 -1.45,2.41 2.99,1.96 3.64,4.3 1.51,1.46 1.41,4.35 3.61,4.48 1.29,3.62 -5.24,-0.17 -6.05,3.95 -2.72,0.1 -4.39,0.87 -6.86,0.14 -1.77,2.62 -3.4,-4.33 -5.5,-1.69 -2.28,0.44 -5.15,-0.75 -5.45,-3.69 -2.8,-1.08 -2.37,-4.15 -5.49,-5.07 0.86,-2.33 -4.24,-1.45 -2.5,-3.64 1.26,-1.32 1.02,-3.42 2.14,-5.03 3.11,-1.81 2.65,4.17 5.98,2.03 1.8,0.07 3.66,2.2 4.99,-0.26 1.68,-0.25 2.29,-3.22 4.19,-0.41 2.39,0.9 4.63,-3.17 4.68,-5.07 -0.07,-1.62 -1.35,-1.85 0.96,-1.89 0.29,-1.86 2.27,-0.18 1.05,1.26 -0.23,2.66 2.99,4.66 2.5,6.51 l 0.51,0.04 z"}}}}),b});
PypiClean
/BitGlitter-2.0.0.tar.gz/BitGlitter-2.0.0/bitglitter/config/palettemodels.py
from sqlalchemy import Boolean, Column, Float, Integer, String, UniqueConstraint import base64 import math import time from bitglitter.config.config import engine, SQLBaseClass from bitglitter.utilities.palette import BitsToColor, ColorsToBits, convert_hex_to_rgb, get_color_distance, \ get_palette_id_from_hash class Palette(SQLBaseClass): __tablename__ = 'palettes' __abstract__ = False is_24_bit = Column(Boolean, default=False) is_custom = Column(Boolean, default=True) is_included_with_repo = Column(Boolean, default=False) # for differentiating other people's colors & our fancy ones palette_id = Column(String, unique=True, nullable=False) name = Column(String, unique=True, nullable=False) description = Column(String) nickname = Column(String, nullable=True, unique=True) color_set = Column(String) color_distance = Column(Float, default=0, nullable=False) number_of_colors = Column(Integer, default=0, nullable=False) bit_length = Column(Integer, default=0, nullable=False) time_created = Column(Integer, default=time.time) base64_string = Column(String) is_valid = Column(Boolean) @classmethod def create(cls, color_set, **kwargs): object_ = super().create(**kwargs) object_._initialize_colors(color_set) if object_.is_custom: assembled_string = '\\\\'.join( [object_.palette_id, object_.name, object_.description, str(object_.time_created), str(object_.convert_colors_to_tuple())]) + '\\\\' object_.base64_string = base64.b64encode(assembled_string.encode()).decode() object_.save() return object_ __table_args__ = ( UniqueConstraint('palette_id'), ) def __str__(self): palette_type = 'Custom' if self.is_custom else 'Default' return f'{palette_type} Palette - {self.name} - {self.number_of_colors} Colors' def _calculate_palette_math(self, color_set, save=True): """Runs during model creation and when color set is updated.""" self.color_distance = get_color_distance(color_set) self.number_of_colors = len(color_set) is_valid = math.log2(self.number_of_colors).is_integer() if is_valid: self.bit_length = int(math.log(self.number_of_colors, 2)) else: self.bit_length = 0 self.is_valid = is_valid if save: # Added to prevent repetitive saves if used in other methods self.save() def convert_colors_to_tuple(self): """Since all of their colors are stored as a single string for speed, this function retrieves it and returns them in a more usable list format. """ if not self.is_24_bit: string_split = self.color_set.split('|') returned_list = [] for piece in string_split: channels = piece.split(',') channels = [int(channel) for channel in channels] returned_list.append((channels[0], channels[1], channels[2])) return returned_list else: return None def _initialize_colors(self, color_set): """An internal method that blindly accepts tuples. Use palettefunctions functions for prior necessary validation of values. """ color_set_cleaned = convert_hex_to_rgb(color_set) if color_set else None if not self.is_24_bit: self._calculate_palette_math(color_set_cleaned, save=False) string_list = [] for color in color_set_cleaned: to_string = [str(channel) for channel in color] string_list.append(','.join(to_string)) self.color_set = '|'.join(string_list) else: self.bit_length = 24 self.color_distance = 0 self.number_of_colors = 16777216 if self.is_custom: self.palette_id = get_palette_id_from_hash(self.name, self.description, self.time_created, color_set_cleaned) def return_encoder(self): color_set_tupled = self.convert_colors_to_tuple() return BitsToColor(color_set_tupled, self.bit_length, self.name) def return_decoder(self): color_set_tupled = self.convert_colors_to_tuple() return ColorsToBits(color_set_tupled, self.bit_length, self.name) SQLBaseClass.metadata.create_all(engine)
PypiClean
/FamcyDev-0.3.71-py3-none-any.whl/Famcy/_util_/_fthread.py
import threading import enum from flask import session, g, request class FPriority(enum.IntEnum): """ This is the enum for defining Famcy module priorities. """ Standard = 1 Error = 2 Critical = 3 class FamcyPageQueue: def __init__(self): super(FamcyPageQueue, self).__init__() self.BackgroundQueueDict = {} # def init_queue(self, _id): # session["BackgroundQueueDict"] = FamcyPriorityQueue() def add(self, value, priority): if isinstance(value.target, list): _page = value.target[0].find_page_parent(value.target[0]) else: _page = value.target.find_page_parent(value.target) route_name = _page.route.replace("/", "_")[1:] # print('route_name: ', route_name) session[route_name+"BackgroundQueueDict"].add(value, priority) # print("add", session[route_name+"BackgroundQueueDict"]) class FamcyPriorityQueue: """ This PQ is adopted from: https://github.com/fafl/priority-queue.git """ def __init__(self): # List of items, flattened binary heap. The first element is not used. # Each node is a tuple of (value, priority, insert_counter) self.nodes = [None] # first element is not used # Current state of the insert counter self.insert_counter = 0 # tie breaker, keeps the insertion order # Comparison function between two nodes # Higher priority wins # On equal priority: Lower insert counter wins def _is_higher_than(self, a, b): return b[1] < a[1] or (a[1] == b[1] and a[2] < b[2]) # Move a node up until the parent is bigger def _heapify(self, new_node_index): while 1 < new_node_index: new_node = self.nodes[new_node_index] parent_index = new_node_index // 2 parent_node = self.nodes[parent_index] # Parent too big? if self._is_higher_than(parent_node, new_node): break # Swap with parent tmp_node = parent_node self.nodes[parent_index] = new_node self.nodes[new_node_index] = tmp_node # Continue further up new_node_index = parent_index # Add a new node with a given priority def add(self, value, priority): new_node_index = len(self.nodes) self.insert_counter += 1 self.nodes.append((value, priority, self.insert_counter)) # Move the new node up in the hierarchy self._heapify(new_node_index) # Return the top element def peek(self): if len(self.nodes) == 1: return None else: return self.nodes[1][0] # Return the bottom element def bottom(self): if len(self.nodes) == 1: return None else: return self.nodes[-1][0] # Remove the top element and return it def pop(self): if len(self.nodes) == 1: raise LookupError("Heap is empty") result = self.nodes[1][0] # Move empty space down empty_space_index = 1 while empty_space_index * 2 < len(self.nodes): left_child_index = empty_space_index * 2 right_child_index = empty_space_index * 2 + 1 # Left child wins if ( len(self.nodes) <= right_child_index or self._is_higher_than(self.nodes[left_child_index], self.nodes[right_child_index]) ): self.nodes[empty_space_index] = self.nodes[left_child_index] empty_space_index = left_child_index # Right child wins else: self.nodes[empty_space_index] = self.nodes[right_child_index] empty_space_index = right_child_index # Swap empty space with the last element and heapify last_node_index = len(self.nodes) - 1 self.nodes[empty_space_index] = self.nodes[last_node_index] self._heapify(empty_space_index) # Throw out the last element self.nodes.pop() return result class FamcyThread(threading.Thread): """ Represent the famcy thread implementation. Currently it's just inherit from the basic threading Thread class. """ def __init__(self, *args, **kwargs): super(FamcyThread, self).__init__(*args, **kwargs)
PypiClean
/Cibyl-1.0.0.0rc1.tar.gz/Cibyl-1.0.0.0rc1/cibyl/models/ci/base/build.py
from typing import Dict, List from cibyl.cli.argument import Argument from cibyl.models.attribute import AttributeDictValue, AttributeListValue from cibyl.models.ci.base.stage import Stage from cibyl.models.ci.base.test import Test from cibyl.models.model import Model class Build(Model): """General model for a job build @DynamicAttrs: Contains attributes added on runtime. """ API = { 'build_id': { 'attr_type': str, 'arguments': [] }, 'status': { 'attr_type': str, 'arguments': [Argument(name='--build-status', arg_type=str, func='get_builds', nargs='*', description="Build status")] }, 'duration': { 'attr_type': int, 'arguments': [], }, 'tests': { 'attr_type': Test, 'attribute_value_class': AttributeDictValue, 'arguments': [Argument(name='--tests', arg_type=str, nargs='*', func='get_tests', description="Job test")] }, 'stages': { 'attr_type': Stage, 'attribute_value_class': AttributeListValue, 'arguments': [Argument(name='--stages', arg_type=str, nargs=0, description="Build stages run")] } } def __init__(self, build_id: str, status: str = None, duration: int = None, tests: Dict[str, Test] = None, stages: List[Stage] = None, **kwargs): if status is not None: status = status.upper() super().__init__({'build_id': build_id, 'status': status, 'duration': duration, 'tests': tests, 'stages': stages, **kwargs}) def __eq__(self, other): if not isinstance(other, self.__class__): return False return self.build_id.value == other.build_id.value def add_test(self, test: Test): """Add a test to the build. :param test: Test to add to the build :type test: Test """ test_name = test.name.value if test_name in self.tests: self.tests[test_name].merge(test) else: self.tests[test_name] = test def add_stage(self, stage: Stage): """Add a stage to the build. :param stage: Stage to add to the build :type stage: Stage """ self.stages.append(stage) def merge(self, other): """Merge the information of two build objects representing the same build. :param other: The Build object to merge :type other: :class:`.Build` """ if not self.status.value: self.status.value = other.status.value for test in other.tests.values(): self.add_test(test) if not self.stages.value and other.stages.value: self.stages = other.stages
PypiClean
/GailBot_Testing_Suite-0.1a8-py3-none-any.whl/gailbot/core/engines/whisperEngine/whisperTimestamped/transcribe_efficient.py
import sys import os import whisper import torch import torch.nn.functional as F from .alignment import perform_word_alignment from .utils import ( get_logit_filters, should_use_space, print_timestamped, round_confidence ) from .vars import ( HOP_LENGTH, SAMPLE_RATE ) import string import logging logger = logging.getLogger() _punctuation = "".join(c for c in string.punctuation if c not in ["-", "'"]) def _transcribe_timestamped_efficient( model, audio, remove_punctuation_from_words, compute_word_confidence, include_punctuation_in_confidence, refine_whisper_precision_nframes, plot_word_alignment, # Whisper specific options **whisper_options, ): """ Timestamps a transcription created by the whisper engine. Args: model: audio: remove_punctuation_from_words: compute_word_confidence: include_punctuation_in_confidence: refine_whisper_precision_nframes: plot_word_alignment: Returns: """ # Get options sample_len = whisper_options["sample_len"] temperature = whisper_options["temperature"] no_speech_threshold = whisper_options["no_speech_threshold"] logprob_threshold = whisper_options["logprob_threshold"] verbose = whisper_options["verbose"] # Note: "on-the-fly" verbose is not implementable in the current state (we don't know the absolute position of the current chunk). See issue #18 verbose_bugged = False whisper_options["verbose"] = None if whisper_options["verbose"] is True else whisper_options["verbose"] # We will print intermediate results ourselves logit_filters = get_logit_filters(model, whisper_options) language = whisper_options["language"] tokenizer = whisper.tokenizer.get_tokenizer(model.is_multilingual, task=whisper_options["task"], language=language) max_sample_len = sample_len or model.dims.n_text_ctx // 2 # Note: we cannot trust the token in the middle of tokenizer.sot_sequence which refers to the language # (arbitrarily set to <|en|> if it's actually None/unknown) token_sot = tokenizer.sot token_eot = tokenizer.eot debug = logger.getEffectiveLevel() >= logging.DEBUG # The main outcome timestamped_word_segments = [] # list of timestamped word segments that have been collected so far # Main variables to be accumulated segment_tokens = [[]] # list of lists of token indices that have been collected so far (one list per segment) segment_attweights = [[] for _ in range(len(model.decoder.blocks))] # attention weights on the last segments segment_avglogprobs = [] # average log probability for each segment (actually of the corresponding chunk, as computed by whisper) segment_logprobs = [] # token log probabilities for each segment # Variables related to options that can skip some segments sot_index = None # index of the SOT token in the current set of processed tokens no_speech_prob = None # no speech probability for the current 30 sec chunk chunk_logprobs = [] # log probabilities for the current 30 sec chunk chunk_tokens = [] # tokens for the current 30 sec chunk (list of Torch tensors) chunk_tokens_nosot = [] # tokens for the current 30 sec chunk, without the SOT tokens (list of indices) last_token_fallback = None # last token to use as a fallback if the model gets stuck has_started = False # whether we have started decoding mfcc = None # MFCC features for the current 30 sec chunk new_mfcc = None # num_inference_steps = 0 # number of inference steps performed so far (for debugging only) def reset(add_segment, keep_last_token): """ Reset the list of tokens for the current speech segment, and corresponding cross-attention weights """ nonlocal segment_tokens, segment_attweights if add_segment: if keep_last_token: segment_tokens.append([segment_tokens[-1][-1]]) segment_attweights = [w[-1:] for w in segment_attweights] else: segment_tokens.append([]) segment_attweights = [[] for w in segment_attweights] segment_tokens[-2].pop(0) if debug: logger.debug(f"Added new segment: {tokenizer.decode_with_timestamps(segment_tokens[-2])}") elif len(segment_tokens[-1]) > 0: segment_tokens[-1] = [] segment_attweights = [[] for w in segment_attweights] if debug: logger.debug(f"Reset last segment to: {tokenizer.decode_with_timestamps(segment_tokens[-1])}") saw_consecutive_timestamps = False def must_flush_segment(curr_tokens): """ Return whether or not the previously collected tokens must be used to add a new speech segment """ nonlocal segment_tokens, saw_consecutive_timestamps, chunk_tokens_nosot if curr_tokens is not None and len(curr_tokens) == 1: is_timestamp = curr_tokens[0] >= tokenizer.timestamp_begin is_previous_timestamp = segment_tokens[-1][-1] >= tokenizer.timestamp_begin if len(segment_tokens[-1]) > 0 else False consecutive_timestamps = is_timestamp and is_previous_timestamp if consecutive_timestamps: saw_consecutive_timestamps = True if len(chunk_tokens_nosot) == max_sample_len - 2 and is_timestamp: consecutive_timestamps = True return consecutive_timestamps else: # Several tokens as a prompt or must flush last segments must_flush = not saw_consecutive_timestamps and len(segment_tokens[-1]) > 1 logger.debug(f"New prompt: flushing = {must_flush}") if not must_flush: # Discard the end of the last transcription reset(False, True) saw_consecutive_timestamps = False return must_flush index_begin_30sec_chunck = 0 def get_index_begin_30sec_chunck(curr_tokens): nonlocal index_begin_30sec_chunck if curr_tokens is None or len(curr_tokens) > 1: res = index_begin_30sec_chunck index_begin_30sec_chunck = len(segment_tokens)-1 return res def may_flush_segment(curr_tokens = None): """ Add a speech segment with the new tokens if necessary. May also remove the last collected segments if filtered out by Whisper (no_speech_prob <= no_speech_threshold) """ nonlocal segment_tokens, segment_attweights, timestamped_word_segments, has_started, no_speech_prob, chunk_tokens, chunk_tokens_nosot, chunk_logprobs, mfcc, new_mfcc, logit_filters, index_begin_30sec_chunck, last_token_fallback, num_inference_steps # Check if a new segment should be added unfinished_decoding = False if must_flush_segment(curr_tokens): if mfcc is None: mfcc = new_mfcc if debug: logger.debug(f"Adding segment {len(timestamped_word_segments)+1} at step {num_inference_steps}:\n\t{tokenizer.decode_with_timestamps(segment_tokens[-1])}") tokens = segment_tokens[-1][1:] # When the decoding hit the max limit (number of tokens) -- usually when the language model gets stuck -- # then we have to recover the last token from what is send to the decoder unfinished_decoding = len(tokens) and tokens[-1] < tokenizer.timestamp_begin last_token_reliable = True if unfinished_decoding: logger.debug(f"WARNING: decoding hit the max limit for segment {segment_tokens} (It usually happens when the language model gets stuck)") # The last token chosen is in the prompt for the new chunk if curr_tokens is not None and curr_tokens[0] == tokenizer.sot_prev: logger.debug(" Guess last token from the prompt for the new chunk") last_token_fallback = curr_tokens[-4].item() # Fallback for the last segment, or without prompt: Assume greedy decoding else: logger.debug(f" Guess last token using probas (assuming greedy decoding)") last_token_fallback = torch.argmax(chunk_logprobs[-1]).item() last_token_reliable = (temperature == 0) if debug: logger.debug(f"WARNING: also add last token: {tokenizer.decode_with_timestamps([last_token_fallback])}") tokens.append(last_token_fallback) segment_tokens[-1].append(last_token_fallback) attention_weights = [torch.cat(w, dim=-2) for w in segment_attweights] last_logprobs = chunk_logprobs[-1] else: attention_weights = [torch.cat(w[:-1], dim=-2) for w in segment_attweights] last_logprobs = chunk_logprobs[-2] # Check prediction of last token end_token = tokens[-1] if end_token >= tokenizer.timestamp_begin: start_token = tokens[0] assert start_token >= tokenizer.timestamp_begin # If Whisper prediction of the end is obviously wrong, we predict it again (constrained) if end_token <= start_token: end_token = last_logprobs[start_token+1:].argmax() + start_token + 1 tokens[-1] = end_token ws = perform_word_alignment( tokens, attention_weights, tokenizer, use_space=should_use_space(language), remove_punctuation_from_words=remove_punctuation_from_words, refine_whisper_precision_nframes=refine_whisper_precision_nframes, unfinished_decoding=unfinished_decoding, mfcc=mfcc, plot=plot_word_alignment, ) add_segment = len(ws) > 0 if add_segment: timestamped_word_segments.append(ws) else: logger.debug(f"Not added!") reset(add_segment, curr_tokens is not None and len(curr_tokens) == 1) i_start = get_index_begin_30sec_chunck(curr_tokens) # All segments from previous 30sec chunck have been collected if (i_start is not None and has_started): mfcc = new_mfcc # Get word confidence and/or check if previous segments shoud have been skipped should_skip = False if compute_word_confidence or no_speech_threshold is not None: # no voice activity check should_skip = (no_speech_prob > no_speech_threshold) if (no_speech_threshold is not None) else False if compute_word_confidence or (should_skip and logprob_threshold is not None): n = len(chunk_logprobs) if n == len(chunk_tokens_nosot): chunk_tokens_nosot = chunk_tokens_nosot[1:] if unfinished_decoding: assert last_token_fallback is not None last_tokens = [last_token_fallback] timestamped_word_segments[-1][-1]["avg_logprob_reliable"] = last_token_reliable n += 1 elif len(chunk_tokens_nosot) >= max_sample_len - 3: # there were segments in the 30sec chunck, and then the LM got stuck last_tokens = [torch.argmax(chunk_logprobs[-1]).item()] timestamped_word_segments[-1][-1]["avg_logprob_reliable"] = (temperature == 0) else: last_tokens = [tokenizer.eot] chunck_indices = chunk_tokens_nosot + last_tokens assert len(chunk_logprobs) == len(chunck_indices), f"{len(chunk_logprobs)} != {len(chunck_indices)}" logprobs = torch.cat([logprob[i].unsqueeze(0) for (logprob, i) in zip(chunk_logprobs, chunck_indices)]) assert min([p.isfinite().item() for p in logprobs]), \ f"Got infinite logprob among ({len(logprobs)}) {[(i, tokenizer.decode_with_timestamps([i]), v.item()) for (i,v) in zip(chunck_indices, logprobs)]}" sum_logprob = sum(logprobs) avg_logprob = sum_logprob/n # don't skip if the logprob is high enough, despite the no_speech_prob if avg_logprob > logprob_threshold: should_skip = False if should_skip: logger.debug(f"Skipping last {len(segment_tokens)-1-i_start} segments (no_speech_prob {no_speech_prob} > {no_speech_threshold} and avg_logprob {avg_logprob} < {logprob_threshold})") index_begin_30sec_chunck -= len(segment_tokens)-1-i_start segment_tokens = segment_tokens[:i_start] + [segment_tokens[-1]] timestamped_word_segments = timestamped_word_segments[:i_start] elif compute_word_confidence: avg_logprob = avg_logprob.item() i_token_end = -1 for i in range(i_start, len(segment_tokens)-1): tokens = segment_tokens[i] i_token_start = i_token_end + 1 i_token_end = i_token_start + len(tokens) assert chunck_indices[i_token_start:i_token_end] == tokens, f"Inconsistent token list {tokenizer.decode_with_timestamps(chunck_indices[i_token_start:i_token_end])} != {tokenizer.decode_with_timestamps(tokens)}" i_token_start += 1 # skip sos (start time) if not unfinished_decoding: i_token_end -= 1 # skip eos (end time) segment_logprobs.append(logprobs[i_token_start:i_token_end]) segment_avglogprobs.append(avg_logprob) else: for i in range(i_start, len(segment_tokens)-1): segment_logprobs.append(None) segment_avglogprobs.append(None) else: for i in range(i_start, len(segment_tokens)-1): segment_logprobs.append(None) segment_avglogprobs.append(None) if verbose_bugged and not should_skip: for segment in timestamped_word_segments[i_start:]: for word in segment: print_timestamped(word) # Reset counters chunk_tokens = [] chunk_tokens_nosot = [] chunk_logprobs = [] no_speech_prob = None def hook_attention_weights(layer, ins, outs, index): nonlocal segment_attweights # In old version of whisper, output is a single tensor assert isinstance(outs, tuple) and len(outs) == 2, "whisper seems to be outdated, please update it (pip install --upgrade --no-deps --force-reinstall git+https://github.com/openai/whisper.git)" w = outs[-1] # Only the last attention weights is useful if w.shape[-2] > 1: w = w[:, :, -1:, :] segment_attweights[index].append(w) def hook_mfcc(layer, ins, outs): nonlocal new_mfcc new_mfcc = ins[0] def hook_input_tokens(layer, ins, outs): nonlocal segment_tokens, sot_index, chunk_tokens, chunk_tokens_nosot, logit_filters, has_started, language, num_inference_steps num_inference_steps += 1 curr_tokens = ins[0] assert curr_tokens.shape[0] == 1, "Batch decoding is not supported" curr_tokens = curr_tokens.squeeze(0) if len(curr_tokens) > 1 or curr_tokens[0] == tokenizer.sot: chunk_prompt = curr_tokens.tolist() if not has_started and language is None: if len(curr_tokens) == 1: # English model language = "en" else: language = tokenizer.decode(curr_tokens[1:2])[2:-2] whisper_options["language"] = language if verbose and not whisper_options["verbose"] and len(curr_tokens) > 1: # Reproduce whisper verbose (2/2) print(f"Detected language: {whisper.tokenizer.LANGUAGES[language].title()}") sys.stdout.flush() logit_filters = get_logit_filters(model, whisper_options, prompt = chunk_prompt[1:-len(tokenizer.sot_sequence)]) may_flush_segment(curr_tokens) # Keep the last token only segment_tokens[-1].append(curr_tokens[-1].item()) # Get the index of the <|startoftranscript|> tokens (to get proba of silence later) if len(curr_tokens) > 1 or curr_tokens[0] == tokenizer.sot: has_started = True if no_speech_threshold is not None: sot_index = curr_tokens.tolist().index(tokenizer.sot) else: sot_index = None # Accumulate tokens if has_started: chunk_tokens.append(curr_tokens) if len(curr_tokens) == 1: chunk_tokens_nosot.append(curr_tokens[-1].item()) else: if verbose and not whisper_options["verbose"]: # Reproduce whisper verbose (1/2) print("Detecting language using up to the first 30 seconds. Use `--language` to specify the language") embedding_weights = None def hook_output_logits(layer, ins, outs): nonlocal no_speech_prob, chunk_logprobs, segment_tokens, chunk_tokens, embedding_weights, has_started if embedding_weights is None: embedding_weights = torch.transpose(model.decoder.token_embedding.weight, 0, 1).to(outs[0].dtype) # Get the probability of silence if sot_index is not None: logits = (outs[0][sot_index,:] @ embedding_weights).float() logits = logits.softmax(dim=-1) no_speech_prob = logits[tokenizer.no_speech].item() # Get the log-probabilities of tokens (we don't know yet which one will be chosen) if has_started: logits = (outs[0][-1:,:] @ embedding_weights).float() tokens = torch.cat(chunk_tokens).unsqueeze(0) for logit_filter in logit_filters: logit_filter.apply(logits, tokens) logits = F.log_softmax(logits.squeeze(0), dim=-1) chunk_logprobs.append(logits) try: # Add hooks to the model, to get tokens and attention weights on the fly all_hooks = [] all_hooks.append(model.encoder.conv1.register_forward_hook(hook_mfcc)) all_hooks.append(model.decoder.token_embedding.register_forward_hook(hook_input_tokens)) for i, block in enumerate(model.decoder.blocks): all_hooks.append( block.cross_attn.register_forward_hook( lambda layer, ins, outs, index=i: hook_attention_weights(layer, ins, outs, index)) ) if compute_word_confidence or no_speech_threshold is not None: all_hooks.append(model.decoder.ln.register_forward_hook(hook_output_logits)) transcription = model.transcribe(audio, **whisper_options) finally: # Remove hooks for hook in all_hooks: hook.remove() # Finalize (collect last segment) may_flush_segment() segment_tokens.pop(-1) token_special_idx = min(token_sot, token_eot) def filter_tokens(tokens): while len(tokens) and tokens[0] >= token_special_idx: tokens = tokens[1:] while len(tokens) and tokens[-1] >= token_special_idx: tokens = tokens[:-1] return tokens assert len(segment_tokens) == len(timestamped_word_segments), f"Inconsistent number of segments: tokens ({len(segment_tokens)}) != timestamped_word_segments ({len(timestamped_word_segments)})" assert len(segment_avglogprobs) == len(segment_tokens), f"Inconsistent number of segments: avg logprobs ({len(segment_avglogprobs)}) != tokens ({len(segment_tokens)})" assert len(segment_logprobs) == len(segment_tokens), f"Inconsistent number of segments: logprobs ({len(segment_logprobs)}) != tokens ({len(segment_tokens)})" whisper_segments = transcription["segments"] l1 = len(whisper_segments) l2 = len(timestamped_word_segments) if l1 != l2 and l1 != 0: logger.warning(f"Inconsistent number of segments: whisper_segments ({l1}) != timestamped_word_segments ({l2})") assert l1 == l2 or l1 == 0, f"Inconsistent number of segments: whisper_segments ({l1}) != timestamped_word_segments ({l2})" logger.debug("Compile results") words = [] for i, (segment, timestamped_words, token, avglogprob, logprobs) in enumerate(zip(whisper_segments, timestamped_word_segments, segment_tokens, segment_avglogprobs, segment_logprobs)): timestamped_tokens = filter_tokens(token) whisper_tokens = filter_tokens(segment["tokens"]) if timestamped_tokens != whisper_tokens: if len(timestamped_tokens) == len(whisper_tokens) + 1: logger.warn(f"An additional token was added on segment {i}") else: assert len(timestamped_tokens) < len(whisper_tokens) and timestamped_tokens == whisper_tokens[:len(timestamped_tokens)], \ f"Fatal Error: Got inconsistent text for segment {i}:\n({tokenizer.decode(timestamped_tokens)}) {timestamped_tokens}\n!=({len(whisper_tokens)}) \n{tokenizer.decode(whisper_tokens)}" logger.warn(f"Text had to be shortned on segment {i}:\n{tokenizer.decode(timestamped_tokens)}\n!=\n{tokenizer.decode(whisper_tokens)}") timestamped_words[-1]["avg_logprob_reliable"] = False offset = segment["seek"] * HOP_LENGTH / SAMPLE_RATE for timestamped_word in timestamped_words: timestamped_word["start"] += offset timestamped_word["end"] += offset timestamped_word["idx_segment"] = i if compute_word_confidence: if "avg_logprob_reliable" not in timestamped_words[-1] or timestamped_words[-1]["avg_logprob_reliable"]: if abs(segment["avg_logprob"] - avglogprob) >= 1e-2: logger.warn(f"Recomputed different logprob for segment {i}: {avglogprob} != {segment['avg_logprob']}") if include_punctuation_in_confidence: segment["confidence"] = round_confidence(logprobs.mean().exp().item()) else: logprobs_nopunc = [] i_end = 0 for timestamped_word in timestamped_words: i_start = i_end tokens = timestamped_word["tokens"] i_end += len(tokens) assert i_end <= len(logprobs), f"Fatal Error: Got out-of-bound index for segment {i}: {i_end} > {len(logprobs)}" if include_punctuation_in_confidence: word_logprobs = logprobs[i_start:i_end] else: tokens_str = [tokenizer.decode([t]) for t in tokens] while len(tokens_str) > 1 and tokens_str[-1][-1] in _punctuation: # Note: look at the last character of token, to take into account "...", "!!", etc. tokens_str = tokens_str[:-1] tokens = tokens[:-1] word_logprobs = logprobs[i_start:i_start + len(tokens)] logprobs_nopunc.append(word_logprobs) timestamped_word["confidence"] = round_confidence(word_logprobs.mean().exp().item()) if i_end != len(logprobs): logger.warn(f"Got inconsistent length for segment {i} ({len(logprobs)} != {i_end}). Some words have been ignored.") if not include_punctuation_in_confidence: logprobs_nopunc = torch.cat(logprobs_nopunc) segment["confidence"] = round_confidence(logprobs_nopunc.mean().exp().item()) words.extend(timestamped_words) return transcription, words
PypiClean
/Falmark-1.1.0-py3-none-any.whl/falmark4.py
import tkinter as tk from tkinter import messagebox from tkinter import simpledialog import psycopg2 import webbrowser USER_DATA_TABLE = "user_data" entry_email = None entry_budget = None entry_product = None entry_description = None entry_developer_type = None def get_user_input(): email = entry_email.get() budget = float(entry_budget.get()) product = entry_product.get() description = entry_description.get("1.0", tk.END).strip() developer_type = entry_developer_type.get() return email, budget, product, description, developer_type def save_user_data(data): conn = psycopg2.connect( host="127.0.0.1", port="5432", database="postgres", user="postgres", password="fender123" ) cur = conn.cursor() cur.execute(f"CREATE TABLE IF NOT EXISTS {USER_DATA_TABLE} (email TEXT, budget FLOAT, product TEXT, description TEXT, developer_type TEXT);") for user in data: cur.execute(f"INSERT INTO {USER_DATA_TABLE} VALUES (%s, %s, %s, %s, %s);", (user["email"], user["budget"], user["product"], user["description"], user["developer_type"])) conn.commit() cur.close() conn.close() def load_user_data(): conn = psycopg2.connect( host="127.0.0.1", port="5432", database="postgres", user="postgres", password="fender123" ) cur = conn.cursor() cur.execute(f"SELECT * FROM {USER_DATA_TABLE};") data = cur.fetchall() cur.close() conn.close() users = [] for user in data: users.append({ "email": user[0], "budget": user[1], "product": user[2], "description": user[3], "developer_type": user[4] }) return users def match_developers (user_data): conn = psycopg2.connect( host="127.0.0.1", port="5432", database="postgres", user="owner", password="fender123" ) cur = conn.cursor() cur.execute(f"SELECT * FROM {USER_DATA_TABLE} WHERE budget >= %s AND LOWER(product) = LOWER(%s) AND LOWER(developer_type) = LOWER(%s);", (user_data["budget"], user_data["product"], user_data["developer_type"])) data = cur.fetchall() cur.close() conn.close() matching_developers = [] for developer in data: matching_developers.append({ "email": developer[0], "budget": developer[1], "product": developer[2], "description": developer[3], "developer_type": developer[4] }) return matching_developers def register_developer(): email, budget, product, description, developer_type = get_user_input() user_data = { "email": email, "budget": budget, "product": product, "description": description, "developer_type": developer_type } save_user_data([user_data]) messagebox.showinfo("Registration Successful", "You have been registered as a freelance developer.") def register_client(): email, budget, product, description, developer_type = get_user_input() user_data = { "email": email, "budget": budget, "product": product, "description": description, "developer_type": developer_type } save_user_data([user_data]) messagebox.showinfo("Registration Successful", "You have been registered as a client searching for a developer.") def find_developer(): user_email, user_budget, user_product, user_description, user_developer_type = get_user_input() user_data = { "email": user_email, "budget": user_budget, "product": user_product, "description": user_description, "marketing_type": user_developer_type } matching_developers = match_developers(user_data) if matching_developers: result = "Matching developers:\n" for developer in matching_developers: result += f"Email: {developer['email']}\n" result += f"Description: {developer['description']}\n" result += f"Marketing Type: {developer['developer_type']}\n\n" messagebox.showinfo("Match Found", result) selected_developer = simpledialog.askstring("Select developer", "Enter the email of the developer you want to message:") if selected_developer: message = simpledialog.askstring("Send Message", "Enter your message:") if message: messagebox.showinfo("Message Sent", "Your message has been sent.") else: messagebox.showinfo("Message Not Sent", "Please enter a message.") else: messagebox.showinfo("No Match", "No matching developer/client found.") def view_profile(): email = entry_email.get() users = load_user_data() for user in users: if user["email"] == email: profile = f"Email: {user['email']}\n" profile += f"Budget: {user['budget']}\n" profile += f"Product: {user['product']}\n" profile += f"Description: {user['description']}\n" profile += f"developer type: {user['developer_type']}" messagebox.showinfo("Profile", profile) break else: messagebox.showinfo("Profile", "No profile found for the provided email.") def open_help_website(): webbrowser.open("https://pencil13130.wixsite.com/falcon") def main(): global entry_email, entry_budget, entry_product, entry_description, entry_developer_type window = tk.Tk() window.title("falmark. instant clinets.") # Styling window.configure(bg="#F5F5F5") window.geometry("400x400") window.resizable(False, False) label_email = tk.Label(window, text="Email:", bg="#F5F5F5") label_email.grid(row=0, column=0, pady=5) entry_email = tk.Entry(window) entry_email.grid(row=0, column=1, pady=5) label_budget = tk.Label(window, text="Budget:", bg="#F5F5F5") label_budget.grid(row=1, column=0, pady=5) entry_budget = tk.Entry(window) entry_budget.grid(row=1, column=1, pady=5) label_product = tk.Label(window, text="your Project idea/service you provide:", bg="#F5F5F5") label_product.grid(row=2, column=0, pady=5) entry_product = tk.Entry(window) entry_product.grid(row=2, column=1, pady=5) label_description = tk.Label(window, text="Description:", bg="#F5F5F5") label_description.grid(row=3, column=0, pady=5) entry_description = tk.Text(window, height=4, width=20) entry_description.grid(row=3, column=1, pady=5) label_developer_type = tk.Label(window, text="Developer type:", bg="#F5F5F5") label_developer_type.grid(row=4, column=0, pady=5) entry_developer_type = tk.Entry(window) entry_developer_type.grid(row=4, column=1, pady=5) button_register_developer = tk.Button(window, text="Register as a developer", command=register_developer, width=20) button_register_developer.grid(row=5, column=0, pady=10) button_register_client = tk.Button(window, text="Register as a Client", command=register_client, width=20) button_register_client.grid(row=5, column=1, pady=10) button_find_developer = tk.Button(window, text="Find a developer or client", command=find_developer, width=20) button_find_developer.grid(row=6, column=0, columnspan=2, pady=10) button_view_profile = tk.Button(window, text="View Profile", command=view_profile, width=20) button_view_profile.grid(row=7, column=0, columnspan=2, pady=10) window.mainloop() if __name__ == "__main__": main()
PypiClean
/GENDIS-1.0.14.tar.gz/GENDIS-1.0.14/gendis/other/example.ipynb
``` import numpy as np import pandas as pd %matplotlib inline import matplotlib.pyplot as plt import sys sys.path.append('..') from data.load_all_datasets import load_data_train_test from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import warnings warnings.filterwarnings('ignore') np.random.seed(1337) # Random seed for reproducibility # Load in all datasets and sort them by complexity metadata = sorted( load_data_train_test(), key=lambda x: x['train']['n_samples']**2*x['train']['n_features']**3 # O(n**2 * m**3) ) dataset = metadata[0] # Take the dataset which is expected to take the least long # Read in the datafiles, split them into features and labels train_df = pd.read_csv(dataset['train']['data_path']) test_df = pd.read_csv(dataset['test']['data_path']) X_train = train_df.drop('target', axis=1) y_train = train_df['target'] X_test = test_df.drop('target', axis=1) y_test = test_df['target'] # Map the labels to the range [0, ..., C-1] with C the number of classes map_dict = {} for j, c in enumerate(np.unique(y_train)): map_dict[c] = j y_train = y_train.map(map_dict) y_test = y_test.map(map_dict) # Convert everything to numpy arrays X_train = X_train.values X_test = X_test.values y_train = y_train.values y_test = y_test.values ``` # 1. Brute-force algorithm ``` from brute_force import BruteForceExtractor bfe = BruteForceExtractor() # Let's extract shapelets of a specific length, else it would take quite a while... shapelets = bfe.extract(X_train, y_train, nr_shapelets=3, min_len=10, max_len=11) plt.figure(figsize=(10, 5)) for shap in shapelets: plt.plot(range(len(shap)), shap) plt.show() ``` # 2. Fast Shapelets (a faster brute-force algorithm) ``` from fast import FastExtractor fe = FastExtractor() shapelets = fe.extract(X_train, y_train, nr_shapelets=3, min_len=10, max_len=11) plt.figure(figsize=(10, 5)) for shap in shapelets: plt.plot(range(len(shap)), shap) plt.show() ``` # 3. SAX Shapelets (approximative algorithm with SAX representations) ``` from sax import SAXExtractor se = SAXExtractor() shapelets = se.extract(X_train, y_train, nr_shapelets=3, min_len=10, max_len=11) plt.figure(figsize=(10, 5)) for shap in shapelets: plt.plot(range(len(shap)), shap) plt.show() ``` # 4. Particle Swarm Optimization (a bio-inspired algorithm) ``` from pso import ParticleSwarmExtractor pse = ParticleSwarmExtractor() shapelets = pse.extract(X_train, y_train) plt.figure(figsize=(10, 5)) for shap in shapelets: plt.plot(range(len(shap)), shap) plt.show() ```
PypiClean
/JTdata-1.5.21-py3-none-any.whl/DataLoader/status.py
import os import abc import pandas as pd from datetime import datetime from .config import data_path,stk_uiverse from .tools import print_func_time,to_intdate aindex_member = os.path.join(data_path,r'AIndexMembers') aindex_membercitics = os.path.join(data_path,r'AIndexMembersCITICS') aindex_altermember = os.path.join(data_path,r'AIndexAlternativeMembers') ashare_description = os.path.join(data_path,r'AShareDescription') ashare_st = os.path.join(data_path,r'AShareST') ashare_suspension = os.path.join(data_path,r'AShareTradingSuspension') ashare_isparticipant = os.path.join(data_path,r'AShareISParticipant') suntime_typedict = os.path.join(data_path,r'RPT_RATING_COMPARE') cfutures_contract_mapping = os.path.join(data_path,r'CFuturesContractMapping') ashare_idu_citics = os.path.join(data_path,r"AShareIndustriesClass_CITICS") ashare_idu_cs = os.path.join(data_path,r"AShareIndustriesClass_CS") ashare_idu_gics =os.path.join(data_path,r"AShareIndustriesClass_GICS") ashare_idu_sw = os.path.join(data_path,r'AShareIndustriesClass_SW') ashare_idu_wind = os.path.join(data_path,r'AShareIndustriesClass_WIND') ashare_idu_code = os.path.join(data_path,r'AShareIndustriesCode') class BaseStatusInfoProvider(abc.ABC): @abc.abstractmethod def get_status_data(self,instruments,fields,start_date,end_date): raise NotImplementedError class LocalIndexStatusProvider(BaseStatusInfoProvider): """ index, stockcode, indate, outdate""" def get_status_data(self,datapath,indexcode,start_date =None,end_date = None): df = pd.read_hdf(os.path.join(datapath,'all.h5'),"data") try: df = df.loc[indexcode] dfcp = df['outdate'].fillna(int(datetime.now().strftime("%Y%m%d"))) if (start_date is not None) & (end_date is not None): start_date,end_date = to_intdate(start_date),to_intdate(end_date) df = df.loc[(df.indate <= end_date)&(dfcp >= start_date)] return df except KeyError: print('Index %s info not found'%indexcode) return pd.DataFrame() @print_func_time def index_member(self,indexcode,start_date =None,end_date = None): """ AIndexMembers """ return self.get_status_data(aindex_member,indexcode,start_date,end_date) @print_func_time def index_member_citics(self,indexcode,start_date =None,end_date = None): """ AIndexMembersCITICS """ return self.get_status_data(aindex_membercitics,indexcode,start_date ,end_date) @print_func_time def index_member_alternative(self,indexcode,start_date =None,end_date = None): """AIndexAlternativeMembers""" return self.get_status_data(aindex_altermember,indexcode,start_date ,end_date) class LocalInstStatusProvider(BaseStatusInfoProvider): """ index, stockcode, indate, outdate""" def get_status_data(self,datapath,stkcodes,fields = None): path = os.path.join(datapath,'all.h5') df = pd.read_hdf(path,'data') if stkcodes: if isinstance(stkcodes,str): stkcodes = [stkcodes,] df = df.loc[df.index.isin(stkcodes)] if fields: df = df[fields] df = df.dropna(how='all',axis=0) return df @print_func_time def list_instrument(self,univ,start_date,end_date): start_date,end_date = to_intdate(start_date),to_intdate(end_date) if univ == 'all': path = os.path.join(ashare_description,'all.h5') df = pd.read_hdf(path,'data') dfcp = df['delistdate'].fillna(int(datetime.now().strftime("%Y%m%d"))) if (start_date is not None) & (end_date is not None): start_date,end_date = to_intdate(start_date),to_intdate(end_date) df = df.loc[(df.listdate <= end_date)&(dfcp >= start_date)] return df else: univ = stk_uiverse.get(univ) IdxSP = LocalIndexStatusProvider() return IdxSP.index_member(univ,start_date,end_date).set_index("stockcode") @print_func_time def ashare_ipodate(self,stkcode,fields = None): """ AIndexMembers """ return self.get_status_data(ashare_description,stkcode,fields) @print_func_time def ashare_st(self,stkcode,fields = None): """ AIndexMembers """ return self.get_status_data(ashare_st,stkcode,fields) @print_func_time def ashare_suspension(self,stkcode,fields = None): """ AIndexMembers """ return self.get_status_data(ashare_suspension,stkcode,fields) @print_func_time def ashare_isparticipant(self,stkcode,fields = None): """ AIndexMembers """ return self.get_status_data(ashare_isparticipant,stkcode,fields) @print_func_time def suntime_typedict(self,organcode,fields = None): """RPT_RATING_COMPARE""" return self.get_status_data(suntime_typedict,organcode,fields) @print_func_time def cfutures_contract_mapping(self,organcode,fields = None): """CFUTURESCONTRACTMAPPING""" return self.get_status_data(cfutures_contract_mapping,organcode,fields) class LocalIndustryMemberProvider: def get_indexmember(self,datapath,stockcode = None,level = None): """ indexmenber reader """ path = os.path.join(datapath,'all.h5') df = pd.read_hdf(path,'data') if level: df = df[[level, "entry_date", "remove_date"]] if stockcode: df = df.loc[stockcode] return df @print_func_time def Industrycompo_citics(self,stockcode = None,level = None): return self.get_indexmember(ashare_idu_citics,stockcode,level) @print_func_time def Industrycompo_sw(self,stockcode = None,level = None): return self.get_indexmember(ashare_idu_sw,stockcode,level) @print_func_time def Industrycompo_cs(self,stockcode = None,level = None): return self.get_indexmember(ashare_idu_cs,stockcode,level) @print_func_time def Industrycompo_gics(self,stockcode = None,level = None): return self.get_indexmember(ashare_idu_gics,stockcode,level) @print_func_time def Industrycompo_wind(self,stockcode = None,level = None): return self.get_indexmember(ashare_idu_wind,stockcode,level) @print_func_time def Industrycodes(self): return pd.read_hdf(os.path.join(ashare_idu_code,"all.h5"),"data")
PypiClean
/Heterogeneous_Highway_Env-0.0.3-py3-none-any.whl/Heteogeneous_Highway_Env/envs/common/graphics.py
import os from typing import TYPE_CHECKING, Callable, List, Optional import numpy as np import pygame from highway_env.envs.common.action import ActionType, DiscreteMetaAction, ContinuousAction from highway_env.road.graphics import WorldSurface, RoadGraphics from highway_env.vehicle.graphics import VehicleGraphics if TYPE_CHECKING: from highway_env.envs import AbstractEnv from highway_env.envs.common.abstract import Action class EnvViewer(object): """A viewer to render a highway driving environment.""" SAVE_IMAGES = False def __init__(self, env: 'AbstractEnv', config: Optional[dict] = None) -> None: self.env = env self.config = config or env.config self.offscreen = self.config["offscreen_rendering"] pygame.init() pygame.display.set_caption("Highway-env") panel_size = (self.config["screen_width"], self.config["screen_height"]) # A display is not mandatory to draw things. Ignoring the display.set_mode() # instruction allows the drawing to be done on surfaces without # handling a screen display, useful for e.g. cloud computing if not self.offscreen: self.screen = pygame.display.set_mode([self.config["screen_width"], self.config["screen_height"]]) self.sim_surface = WorldSurface(panel_size, 0, pygame.Surface(panel_size)) self.sim_surface.scaling = self.config.get("scaling", self.sim_surface.INITIAL_SCALING) self.sim_surface.centering_position = self.config.get("centering_position", self.sim_surface.INITIAL_CENTERING) self.clock = pygame.time.Clock() self.enabled = True if os.environ.get("SDL_VIDEODRIVER", None) == "dummy": self.enabled = False self.observer_vehicle = None self.agent_display = None self.agent_surface = None self.vehicle_trajectory = None self.frame = 0 self.directory = None def set_agent_display(self, agent_display: Callable) -> None: """ Set a display callback provided by an agent So that they can render their behaviour on a dedicated agent surface, or even on the simulation surface. :param agent_display: a callback provided by the agent to display on surfaces """ if self.agent_display is None: if not self.offscreen: if self.config["screen_width"] > self.config["screen_height"]: self.screen = pygame.display.set_mode((self.config["screen_width"], 2 * self.config["screen_height"])) else: self.screen = pygame.display.set_mode((2 * self.config["screen_width"], self.config["screen_height"])) self.agent_surface = pygame.Surface((self.config["screen_width"], self.config["screen_height"])) self.agent_display = agent_display def set_agent_action_sequence(self, actions: List['Action']) -> None: """ Set the sequence of actions chosen by the agent, so that it can be displayed :param actions: list of action, following the env's action space specification """ if isinstance(self.env.action_type, DiscreteMetaAction): actions = [self.env.action_type.actions[a] for a in actions] if len(actions) > 1: self.vehicle_trajectory = self.env.vehicle.predict_trajectory(actions, 1 / self.env.config["policy_frequency"], 1 / 3 / self.env.config["policy_frequency"], 1 / self.env.config["simulation_frequency"]) def handle_events(self) -> None: """Handle pygame events by forwarding them to the display and environment vehicle.""" for event in pygame.event.get(): if event.type == pygame.QUIT: self.env.close() self.sim_surface.handle_event(event) if self.env.action_type: EventHandler.handle_event(self.env.action_type, event) def display(self) -> None: """Display the road and vehicles on a pygame window.""" if not self.enabled: return self.sim_surface.move_display_window_to(self.window_position()) RoadGraphics.display(self.env.road, self.sim_surface) if self.vehicle_trajectory: VehicleGraphics.display_trajectory( self.vehicle_trajectory, self.sim_surface, offscreen=self.offscreen) RoadGraphics.display_road_objects( self.env.road, self.sim_surface, offscreen=self.offscreen ) if self.agent_display: self.agent_display(self.agent_surface, self.sim_surface) if not self.offscreen: if self.config["screen_width"] > self.config["screen_height"]: self.screen.blit(self.agent_surface, (0, self.config["screen_height"])) else: self.screen.blit(self.agent_surface, (self.config["screen_width"], 0)) RoadGraphics.display_traffic( self.env.road, self.sim_surface, simulation_frequency=self.env.config["simulation_frequency"], offscreen=self.offscreen) ObservationGraphics.display(self.env.observation_type, self.sim_surface) if not self.offscreen: self.screen.blit(self.sim_surface, (0, 0)) if self.env.config["real_time_rendering"]: self.clock.tick(self.env.config["simulation_frequency"]) pygame.display.flip() if self.SAVE_IMAGES and self.directory: pygame.image.save(self.sim_surface, str(self.directory / "highway-env_{}.png".format(self.frame))) self.frame += 1 def get_image(self) -> np.ndarray: """ The rendered image as a rgb array. OpenAI gym's channel convention is H x W x C """ surface = self.screen if self.config["render_agent"] and not self.offscreen else self.sim_surface data = pygame.surfarray.array3d(surface) # in W x H x C channel convention return np.moveaxis(data, 0, 1) def window_position(self) -> np.ndarray: """the world position of the center of the displayed window.""" if self.observer_vehicle: return self.observer_vehicle.position elif self.env.vehicle: return self.env.vehicle.position else: return np.array([0, 0]) def close(self) -> None: """Close the pygame window.""" pygame.quit() class EventHandler(object): @classmethod def handle_event(cls, action_type: ActionType, event: pygame.event.EventType) -> None: """ Map the pygame keyboard events to control decisions :param action_type: the ActionType that defines how the vehicle is controlled :param event: the pygame event """ if isinstance(action_type, DiscreteMetaAction): cls.handle_discrete_action_event(action_type, event) elif action_type.__class__ == ContinuousAction: cls.handle_continuous_action_event(action_type, event) @classmethod def handle_discrete_action_event(cls, action_type: DiscreteMetaAction, event: pygame.event.EventType) -> None: if event.type == pygame.KEYDOWN: if event.key == pygame.K_RIGHT and action_type.longitudinal: action_type.act(action_type.actions_indexes["FASTER"]) if event.key == pygame.K_LEFT and action_type.longitudinal: action_type.act(action_type.actions_indexes["SLOWER"]) if event.key == pygame.K_DOWN and action_type.lateral: action_type.act(action_type.actions_indexes["LANE_RIGHT"]) if event.key == pygame.K_UP: action_type.act(action_type.actions_indexes["LANE_LEFT"]) @classmethod def handle_continuous_action_event(cls, action_type: ContinuousAction, event: pygame.event.EventType) -> None: action = action_type.last_action.copy() steering_index = action_type.space().shape[0] - 1 if event.type == pygame.KEYDOWN: if event.key == pygame.K_RIGHT and action_type.lateral: action[steering_index] = 0.7 if event.key == pygame.K_LEFT and action_type.lateral: action[steering_index] = -0.7 if event.key == pygame.K_DOWN and action_type.longitudinal: action[0] = -0.7 if event.key == pygame.K_UP and action_type.longitudinal: action[0] = 0.7 elif event.type == pygame.KEYUP: if event.key == pygame.K_RIGHT and action_type.lateral: action[steering_index] = 0 if event.key == pygame.K_LEFT and action_type.lateral: action[steering_index] = 0 if event.key == pygame.K_DOWN and action_type.longitudinal: action[0] = 0 if event.key == pygame.K_UP and action_type.longitudinal: action[0] = 0 action_type.act(action) class ObservationGraphics(object): COLOR = (0, 0, 0) @classmethod def display(cls, obs, sim_surface): from highway_env.envs.common.observation import LidarObservation if isinstance(obs, LidarObservation): cls.display_grid(obs, sim_surface) @classmethod def display_grid(cls, lidar_observation, surface): psi = np.repeat(np.arange(-lidar_observation.angle/2, 2 * np.pi - lidar_observation.angle/2, 2 * np.pi / lidar_observation.grid.shape[0]), 2) psi = np.hstack((psi[1:], [psi[0]])) r = np.repeat(np.minimum(lidar_observation.grid[:, 0], lidar_observation.maximum_range), 2) points = [(surface.pos2pix(lidar_observation.origin[0] + r[i] * np.cos(psi[i]), lidar_observation.origin[1] + r[i] * np.sin(psi[i]))) for i in range(np.size(psi))] pygame.draw.lines(surface, ObservationGraphics.COLOR, True, points, 1)
PypiClean
/Docassemble-Flask-User-0.6.28.tar.gz/Docassemble-Flask-User-0.6.28/README.rst
Flask-User v0.6 =============== Modified for Docassemble. .. attention:: The documentation has moved to http://flask-user.readthedocs.io/en/v0.6 User Authentication and Management ---------------------------------- | So, you're writing a Flask web application and would like to authenticate your users. | You start with a simple **Login** page, but soon enough you'll need to handle: * **Registrations** and **Email Confirmations** * **Change Usernames**, **Change Passwords**, and **Forgotten Passwords** And wouldn't it be nice to also offer: * **Added security** * **Increased reliability** * **Role-based Authorization** * **Internationalization** * **Support for multiple emails per user** | Flask-User offers these features and more. Customizable, yet Ready to use ------------------------------ * **Largely Configurable** -- By overriding configuration settings. * **Almost fully Customizable** -- By overriding functions and properties. * **Ready to use** -- Through sensible defaults. * Supports **SQL Databases** and **MongoDB Databases**. * **Event hooking** -- Through efficient signals. Secure and Reliable ------------------- * **Secure** -- Built on top of widely deployed Passlib, PyCrypto, ItsDangerous. * **Reliable** -- Code coverage of over 90% * **Available** -- Tested on Python 2.6, 2.7 and 3.3-3.6 Well documented --------------- - `Flask-User v0.6 documentation <http://flask-user.readthedocs.io/en/v0.6/>`_ - `Flask-User v0.5 documentation <http://flask-user.readthedocs.io/en/v0.5/>`_ Comes with translations ----------------------- Chinese, Dutch, English, Farsi, Finnish, French, German, Italian, Russian, Spanish, Swedish, and Turkish Alternatives ------------ * `Flask-Login <https://flask-login.readthedocs.org/en/latest/>`_ * `Flask-Security <https://pythonhosted.org/Flask-Security/>`_ Authors ------- | **Lead developer and Maintainer** | Ling Thio -- ling.thio AT gmail DOT com | | **Contributors** | `Many contributors <https://github.com/lingthio/Flask-User/graphs/contributors>`_
PypiClean
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20/lib/aloha/sidebar.js
* Aloha Editor is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version.* * * Aloha Editor is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ /** * @todo: - Make the sidebars resizable using drag handles * - Make overlayPage setting settable from external config */ define( [ 'aloha/core', 'aloha/jquery', 'aloha/selection' // 'aloha/plugin' // For when we plugify sidebar ], function ( Aloha, jQuery, Selection, Plugin ) { var $ = jQuery; var undefined = void 0; // Pseudo-namespace prefix for Sidebar elements // Rational: // We use a prefix instead of an enclosing class or id because we need to // be paranoid of unintended style inheritance in an environment like the // one in which Aloha-Editor operates in, with its numerous custom plugins. // eg: .inner or .btn can be used in several plugins, with eaching adding // to the class styles properties that we don't want. var ns = 'aloha-sidebar'; var uid = +( new Date ); // namespaced classnames var nsClasses = { 'bar' : nsClass( 'bar' ), 'handle' : nsClass( 'handle' ), 'inner' : nsClass( 'inner' ), 'panels' : nsClass( 'panels' ), 'config-btn' : nsClass( 'config-btn' ), 'handle-icon' : nsClass( 'handle-icon' ), 'panel-content' : nsClass( 'panel-content' ), 'panel-content-inner' : nsClass( 'panel-content-inner' ), 'panel-content-inner-text' : nsClass( 'panel-content-inner-text' ), 'panel-title' : nsClass( 'panel-title' ), 'panel-title-arrow' : nsClass( 'panel-title-arrow' ), 'panel-title-text' : nsClass( 'panel-title-text' ) }; // Extend jQuery easing animations if ( !jQuery.easing.easeOutExpo ) { jQuery.extend(jQuery.easing, { easeOutExpo: function (x, t, b, c, d) { return (t==d)?b+c:c*(-Math.pow(2,-10*t/d)+1)+b; }, easeOutElastic: function (x, t, b, c, d) { var m=Math,s=1.70158,p=0,a=c; if(!t)return b; if((t/=d)==1)return b+c; if(!p)p=d*.3; if(a<m.abs(c)){a=c;var s=p/4;}else var s=p/(2*m.PI)*m.asin(c/a); return a*m.pow(2,-10*t)*m.sin((t*d-s)*(2*m.PI)/p)+c+b; } }); } // ------------------------------------------------------------------------ // Local (helper) functions // ------------------------------------------------------------------------ /** * Simple templating * * @param {String} str - The string containing placeholder keys in curly * brackets * @param {Object} obj - Associative array of replacing placeholder keys * with corresponding values */ function supplant ( str, obj ) { return str.replace( /\{([a-z0-9\-\_]+)\}/ig, function ( str, p1, offset, s ) { var replacement = obj[ p1 ] || str; return ( typeof replacement == 'function' ) ? replacement() : replacement; } ); }; /** * Wrapper to call the supplant method on a given string, taking the * nsClasses object as the associative array containing the replacement * pairs * * @param {String} str * @return {String} */ function renderTemplate ( str ) { return ( typeof str == 'string' ) ? supplant( str, nsClasses ) : str; }; /** * Generates a selector string with this plugins's namespace prefixed the * each classname * * Usage: * nsSel('header,', 'main,', 'foooter ul') * will return * ".aloha-myplugin-header, .aloha-myplugin-main, .aloha-mypluzgin-footer ul" * * @return {String} */ function nsSel () { var strBldr = [], prx = ns; jQuery.each( arguments, function () { strBldr.push( '.' + ( this == '' ? prx : prx + '-' + this ) ); } ); return jQuery.trim( strBldr.join( ' ' ) ); }; /** * Generates a string with this plugins's namespace prefixed the each * classname * * Usage: * nsClass('header', 'innerheaderdiv') * will return * "aloha-myplugin-header aloha-myplugin-innerheaderdiv" * * @return {String} */ function nsClass () { var strBldr = [], prx = ns; jQuery.each( arguments, function () { strBldr.push( this == '' ? prx : prx + '-' + this ); } ); return jQuery.trim( strBldr.join(' ') ); }; // ------------------------------------------------------------------------ // Sidebar constructor // Only instance properties are to be defined here // ------------------------------------------------------------------------ var Sidebar = function Sidebar ( opts ) { var sidebar = this; this.id = nsClass( ++uid ); this.panels = {}; this.container = jQuery( renderTemplate( '<div class="{bar}">' + '<div class="{handle}">' + '<span class="{handle-icon}"></span>' + '</div>' + '<div class="{inner}">' + '<ul class="{panels}"></ul>' + '</div>' + '</div>' ) ); // defaults this.width = 300; this.opened = false; this.isOpen = false; this.settings = { // We automatically set this to true when we are in IE, where rotating // elements using filters causes undesirable rendering ugliness. // Our solution is to fallback to swapping icon images. // We set this as a sidebar property so that it can overridden by // whoever thinks they are smarter than we are. rotateIcons : !jQuery.browser.msie, overlayPage : true }; // Initialize after dom is ready jQuery( function () { if ( !( ( typeof Aloha.settings.sidebar != 'undefined' ) && Aloha.settings.sidebar.disabled ) ) { sidebar.init( opts ); } } ); }; // ------------------------------------------------------------------------ // Sidebar prototype // All properties to be shared across Sidebar instances can be placed in // the prototype object // ------------------------------------------------------------------------ jQuery.extend(Sidebar.prototype, { // Build as much of the sidebar as we can before appending it to DOM to // minimize reflow. init: function (opts) { var that = this; var panels; // Pluck panels list from opts if (typeof opts == 'object') { panels = opts.panels; delete opts.panels; } // Copy any implements, and overrides in opts to this Sidebar instance jQuery.extend(this, opts); if (typeof panels == 'object') { jQuery.each(panels, function () { that.addPanel(this, true); }); } var bar = this.container; if (this.position == 'right') { bar.addClass(nsClass('right')); } // Place the bar into the DOM bar.hide() .appendTo(jQuery('body')) .click(function () {that.barClicked.apply(that, arguments);}) .find(nsSel('panels')).width(this.width); // IE7 needs us to explicitly set the container width, since it is // unable to determine it on its own bar.width(this.width); this.width = bar.width(); jQuery(window).resize(function () { that.updateHeight(); }); this.updateHeight(); this.roundCorners(); this.initToggler(); this.container.css(this.position == 'right' ? 'marginRight' : 'marginLeft', -this.width); if (this.opened) { this.open(0); } this.toggleHandleIcon(this.isOpen); this.subscribeToEvents(); jQuery(window).resize(function () { that.correctHeight(); }); this.correctHeight(); }, show: function () { this.container.css( 'display', 'block' ); //.animate({opacity: 1}, 1000); return this; }, hide: function () { this.container.css( 'display','none' ); // .animate({opacity: 0}, 1000, function () { // jQuery(this).css('display', 'block') // }); return this; }, /** * Determines the effective elements at the current selection. * Iterates through all panels and checks whether the panel should be * activated for any of the effective elements in the selection. * * @param {Object} range - The Aloha.RangeObject */ checkActivePanels: function( range ) { var effective = []; if ( typeof range != 'undefined' && typeof range.markupEffectiveAtStart != 'undefined' ) { var l = range.markupEffectiveAtStart.length; for ( var i = 0; i < l; ++i ) { effective.push( jQuery( range.markupEffectiveAtStart[ i ] ) ); } } var that = this; jQuery.each( this.panels, function () { that.showActivePanel( this, effective ); } ); this.correctHeight(); }, subscribeToEvents: function () { var that = this; var $container = this.container; Aloha.bind( 'aloha-selection-changed', function( event, range ) { that.checkActivePanels( range ); } ); $container.mousedown( function( e ) { e.originalEvent.stopSelectionUpdate = true; Aloha.eventHandled = true; //e.stopSelectionUpdate = true; } ); $container.mouseup( function ( e ) { e.originalEvent.stopSelectionUpdate = true; Aloha.eventHandled = false; } ); Aloha.bind( 'aloha-editable-deactivated', function ( event, params ) { that.checkActivePanels(); } ); }, /** * Dynamically set appropriate heights for panels. * The height for each panel is determined by the amount of space that * is available in the viewport and the number of panels that need to * share that space. */ correctHeight: function () { var height = this.container.find(nsSel('inner')).height() - (15 * 2); var panels = []; jQuery.each(this.panels, function () { if (this.isActive) { panels.push(this); } }); if (panels.length == 0) { return; } var remainingHeight = height - ((panels[0].title.outerHeight() + 10) * panels.length); var panel; var targetHeight; var panelInner; var panelText; var undone; var toadd = 0; var math = Math; // Local reference for quicker lookup while (panels.length > 0 && remainingHeight > 0) { remainingHeight += toadd; toadd = 0; undone = []; for (var j = panels.length - 1; j >= 0; --j) { panel = panels[j]; panelInner = panel.content.find(nsSel('panel-content-inner')); targetHeight = math.min( panelInner.height('auto').height(), math.floor(remainingHeight / (j + 1)) ); panelInner.height(targetHeight); remainingHeight -= targetHeight; panelText = panelInner.find(nsSel('panel-content-inner-text')); if (panelText.height() > targetHeight) { undone.push(panel); toadd += targetHeight; panelInner.css({ 'overflow-x': 'hidden', 'overflow-y': 'scroll' }); } else { panelInner.css('overflow-y', 'hidden'); } if (panel.expanded) { panel.expand(); } } panels = undone; } }, /** * Checks whether this panel should be activated (ie: made visible) for * any of the elements specified in a given list of elements. * * We have to add a null object to the list of elements to allow us to * check whether the panel should be visible when we have no effective * elements in the current selection * * @param {Object} panel - The Panel object we will test * @param {Array} elements - The effective elements (jQuery), any of * which may activate the panel */ showActivePanel: function (panel, elements) { elements.push(null); var j = elements.length; var count = 0; var li = panel.content.parent('li'); var activeOn = panel.activeOn; var effective = jQuery(); for (var i = 0; i < j; ++i) { if (activeOn(elements[i])) { ++count; if (elements[i]) { jQuery.merge(effective, elements[i]); } } } if (count) { panel.activate(effective); } else { panel.deactivate(); } this.roundCorners(); }, /** * Sets up the functionality, event listeners, and animation of the * sidebar handle */ initToggler: function () { var that = this; var bar = this.container; var icon = bar.find(nsSel('handle-icon')); var toggledClass = nsClass('toggled'); var bounceTimer; var isRight = (this.position == 'right'); if (this.opened) { this.rotateHandleArrow(isRight ? 0 : 180, 0); } // configure the position of the sidebar handle jQuery( function () { if ( typeof Aloha.settings.sidebar != 'undefined' && Aloha.settings.sidebar.handle && Aloha.settings.sidebar.handle.top ) { jQuery(bar.find(nsSel('handle'))).get(0).style.top = Aloha.settings.sidebar.handle.top; } } ); bar.find(nsSel('handle')) .click(function () { if (bounceTimer) { clearInterval(bounceTimer); } icon.stop().css('marginLeft', 4); if (that.isOpen) { jQuery(this).removeClass(toggledClass); that.close(); that.isOpen = false; } else { jQuery(this).addClass(toggledClass); that.open(); that.isOpen = true; } }).hover( function () { var flag = that.isOpen ? -1 : 1; if (bounceTimer) { clearInterval(bounceTimer); } icon.stop(); jQuery(this).stop().animate( isRight ? {marginLeft: '-=' + (flag * 5)} : {marginRight: '-=' + (flag * 5)}, 200 ); bounceTimer = setInterval(function () { flag *= -1; icon.animate( isRight ? {left: '-=' + (flag * 4)} : {right: '-=' + (flag * 4)}, 300 ); }, 300); }, function () { if (bounceTimer) { clearInterval(bounceTimer); } icon.stop().css(isRight ? 'left' : 'right', 5); jQuery(this).stop().animate( isRight ? {marginLeft: 0} : {marginRight: 0}, 600, 'easeOutElastic' ); } ); }, /** * Rounds the top corners of the first visible panel, and the bottom * corners of the last visible panel elements in the panels ul list */ roundCorners: function () { var bar = this.container; var lis = bar.find(nsSel('panels>li:not(', 'deactivated)')); var topClass = nsClass('panel-top'); var bottomClass = nsClass('panel-bottom'); bar.find(nsSel('panel-top,', 'panel-bottom')) .removeClass(topClass) .removeClass(bottomClass); lis.first().find(nsSel('panel-title')).addClass(topClass); lis.last().find(nsSel('panel-content')).addClass(bottomClass); }, /** * Updates the height of the inner div of the sidebar. This is done * whenever the viewport is resized */ updateHeight: function () { var h = jQuery(window).height(); this.container.height(h).find(nsSel('inner')).height(h); }, /** * Delegate all sidebar onclick events to the container. * Then use handleBarclick method until we bubble up to the first * significant element that we can interact with */ barClicked: function (ev) { this.handleBarclick(jQuery(ev.target)); }, /** * We handle all click events on the sidebar from here--dispatching * calls to which ever methods that should be invoked for the each * interaction */ handleBarclick: function (el) { if (el.hasClass(nsClass('panel-title'))) { this.togglePanel(el); } else if (el.hasClass(nsClass('panel-content'))) { // Aloha.Log.log('Content clicked'); } else if (el.hasClass(nsClass('handle'))) { // Aloha.Log.log('Handle clicked'); } else if (el.hasClass(nsClass('bar'))) { // Aloha.Log.log('Sidebar clicked'); } else { this.handleBarclick(el.parent()); } }, getPanelById: function (id) { return this.panels[id]; }, getPanelByElement: function (el) { var li = (el[0].tagName == 'LI') ? el : el.parent('li'); return this.getPanelById(li[0].id); }, togglePanel: function (el) { this.getPanelByElement(el).toggle(); }, /** * Animation to rotate the sidebar arrow * * @param {Number} angle - The angle two which the arrow should rotate * (0 or 180) * @param {Number|String} duration - (Optional) How long the animation * should play for */ rotateHandleIcon: function (angle, duration) { var arr = this.container.find(nsSel('handle-icon')); arr.animate({angle: angle}, { duration : (typeof duration == 'number' || typeof duration == 'string') ? duration : 500, easing : 'easeOutExpo', step : function (val, fx) { arr.css({ '-o-transform' : 'rotate(' + val + 'deg)', '-webkit-transform' : 'rotate(' + val + 'deg)', '-moz-transform' : 'rotate(' + val + 'deg)', '-ms-transform' : 'rotate(' + val + 'deg)' // We cannot use Microsoft Internet Explorer filters // because Microsoft Internet Explore 8 does not support // Microsoft Internet Explorer filters correctly. It // breaks the layout // filter : 'progid:DXImageTransform.Microsoft.BasicImage(rotation=' + (angle / 90) + ')' }); } }); }, /** * Sets the handle icon to the "i am opened, click me to close the * sidebar" state, or vice versa. The direction of the arrow depends * on whether the sidebar is on the left or right, and whether it is * in an opened state or not. * * Question: * Given that the arrow icon is by default pointing right, should * we make it point left? * * Answer: * isRight & isOpen : no * isRight & isClosed : yes * isLeft & isOpen : yes * isLeft & isClosed : no * * Truth table: * isRight | isOpen | XOR * ---------+--------+----- * T | T | F * T | F | T * F | T | T * F | F | F * * Therefore: * isPointingLeft = isRight XOR isOpen * * @param {Boolean} isOpened - Whether or not the sidebar is in the * opened state */ toggleHandleIcon: function (isOpen) { var isPointingLeft = (this.position == 'right') ^ isOpen; if (this.settings.rotateIcons) { this.rotateHandleIcon(isPointingLeft ? 180 : 0, 0); } else { var icon = this.container.find(nsSel('handle-icon')); if (isPointingLeft) { icon.addClass(nsClass('handle-icon-left')); } else { icon.removeClass(nsClass('handle-icon-left')); } } }, /** * Slides the sidebar into view */ open: function (duration, callback) { if (this.isOpen) { return this; } var isRight = (this.position == 'right'); var anim = isRight ? {marginRight: 0} : {marginLeft: 0}; this.toggleHandleIcon(true); this.container.animate( anim, (typeof duration == 'number' || typeof duration == 'string') ? duration : 500, 'easeOutExpo' ); if (!this.settings.overlayPage) { jQuery('body').animate( isRight ? {marginRight: '+=' + this.width} : {marginLeft: '+=' + this.width}, 500, 'easeOutExpo' ); } this.isOpen = true; jQuery('body').trigger(nsClass('opened'), this); return this; }, /** * Slides that sidebar out of view */ close: function (duration, callback) { if (!this.isOpen) { return this; } var isRight = (this.position == 'right'); var anim = isRight ? {marginRight: -this.width} : {marginLeft: -this.width}; this.toggleHandleIcon(false); this.container.animate( anim, (typeof duration == 'number' || typeof duration == 'string') ? duration : 500, 'easeOutExpo' ); if (!this.settings.overlayPage) { jQuery('body').animate( isRight ? {marginRight: '-=' + this.width} : {marginLeft: '-=' + this.width}, 500, 'easeOutExpo' ); } this.isOpen = false; return this; }, /** * Activates the given panel and passes to it the given element as the * the effective that we want it to think activated it. * * @param {Object|String} panel - Panel instance or the id of a panel * object * @param {jQuery} element - Element to pass to the panel as effective * element (the element that activated it) */ activatePanel: function (panel, element) { if (typeof panel == 'string') { panel = this.getPanelById(panel); } if (panel){ panel.activate(element); } this.roundCorners(); return this; }, /** * Invokes the expand method for the given panel so that it expands its * height to display its contents * * @param {Object|String} panel - Panel instance or the id of a panel * object * @param {Funtion} callback */ expandPanel: function (panel, callback) { if (typeof panel == 'string') { panel = this.getPanelById(panel); } if (panel){ panel.expand(callback); } return this; }, /** * Collapses the panel contents by invoking the given panel's collapse * method. * * @param {Object|String} panel - Panel instance or the id of a panel * object * @param {Funtion} callback */ collapsePanel: function (panel, callback) { if (typeof panel == 'string') { panel = this.getPanelById(panel); } if (panel){ panel.collapse(callback); } return this; }, /** * Adds a panel to this sidebar instance. * We try and build as much of the panel DOM as we can before inserting * it into the DOM in order to reduce reflow. * * @param {Object} panel - either a panel instance or an associative * array containing settings for the construction * of a new panel. * @param {Boolean} deferRounding - (Optional) If true, the rounding-off * of the top most and bottom most panels * will not be automatically done. Set * this to true when adding a lot of panels * at once. * @return {Object} - The newly created panel. */ addPanel: function (panel, deferRounding) { if (!(panel instanceof Panel)) { if (!panel.width) { panel.width = this.width; } panel.sidebar = this; panel = new Panel(panel); } this.panels[panel.id] = panel; this.container.find(nsSel('panels')).append(panel.element); if (deferRounding !== true) { this.roundCorners(); } this.checkActivePanels(Selection.getRangeObject()); return panel; } }); // ------------------------------------------------------------------------ // Panel constructor // ------------------------------------------------------------------------ var Panel = function Panel (opts) { this.id = null; this.folds = {}; this.button = null; this.title = jQuery(renderTemplate(' \ <div class="{panel-title}"> \ <span class="{panel-title-arrow}"></span> \ <span class="{panel-title-text}">Untitled</span> \ </div> \ ')); this.content = jQuery(renderTemplate(' \ <div class="{panel-content}"> \ <div class="{panel-content-inner}"> \ <div class="{panel-content-inner-text}">\ </div> \ </div> \ </div> \ ')); this.element = null; this.expanded = false; this.effectiveElement = null; this.isActive = true; this.init(opts); }; // ------------------------------------------------------------------------ // Panel prototype // ------------------------------------------------------------------------ jQuery.extend(Panel.prototype, { init: function (opts) { this.setTitle(opts.title) .setContent(opts.content); delete opts.title; delete opts.content; jQuery.extend(this, opts); if (!this.id) { this.id = nsClass(++uid); } var li = this.element = jQuery('<li id="' +this.id + '">') .append(this.title, this.content); if (this.expanded){ this.content.height('auto'); } this.toggleTitleIcon(this.expanded); this.coerceActiveOn(); // Disable text selection on title element this.title .attr('unselectable', 'on') .css('-moz-user-select', 'none') .each(function() {this.onselectstart = function() {return false;};}); if (typeof this.onInit == 'function') { this.onInit.apply(this); } }, /** * @param {Boolean} isExpanded - Whether or not the panel is in an * expanded state */ toggleTitleIcon: function (isExpanded) { if (this.sidebar.settings.rotateIcons) { this.rotateTitleIcon(isExpanded ? 90 : 0); } else { var icon = this.title.find(nsSel('panel-title-arrow')); if (isExpanded) { icon.addClass(nsClass('panel-title-arrow-down')); } else { icon.removeClass(nsClass('panel-title-arrow-down')); } } }, /** * Normalizes the activeOn property into a predicate function */ coerceActiveOn: function () { if (typeof this.activeOn != 'function') { var activeOn = this.activeOn; this.activeOn = (function () { var typeofActiveOn = typeof activeOn, fn; if (typeofActiveOn == 'boolean') { fn = function () { return activeOn; }; } else if (typeofActiveOn == 'undefined') { fn = function () { return true; }; } else if (typeofActiveOn == 'string') { fn = function (el) { return el ? el.is(activeOn) : false; }; } else { fn = function () { return false; }; } return fn; })(); } }, /** * Activates (displays) this panel */ activate: function (effective) { this.isActive = true; this.content.parent('li').show().removeClass(nsClass('deactivated')); this.effectiveElement = effective; if (typeof this.onActivate == 'function') { this.onActivate.call(this, effective); } }, /** * Hides this panel */ deactivate: function () { this.isActive = false; this.content.parent('li').hide().addClass(nsClass('deactivated')); this.effectiveElement = null; }, toggle: function () { if (this.expanded) { this.collapse(); } else { this.expand(); } }, /** * Displays the panel's contents */ expand: function (callback) { var that = this; var el = this.content; var old_h = el.height(); var new_h = el.height('auto').height(); el.height(old_h).stop().animate( {height: new_h}, 500, 'easeOutExpo', function () { if (typeof callback == 'function') { callback.call(that); } } ); this.element.removeClass('collapsed'); this.toggleTitleIcon(true); this.expanded = true; return this; }, /** * Hides the panel's contents--leaving only it's header */ collapse: function (duration, callback) { var that = this; this.element.addClass('collapsed'); this.content.stop().animate( {height: 5}, 250, 'easeOutExpo', function () { if (typeof callback == 'function') { callback.call(that); } } ); this.toggleTitleIcon(false); this.expanded = false; return this; }, /** * May also be called by the Sidebar to update title of panel * * @param html - Markup string, DOM object, or jQuery object */ setTitle: function (html) { this.title.find(nsSel('panel-title-text')).html(html); return this; }, /** * May also be called by the Sidebar to update content of panel * * @param html - Markup string, DOM object, or jQuery object */ setContent: function (html) { // We do this so that empty panels don't appear collapsed if (!html || html == '') { html = '&nbsp;'; } this.content.find(nsSel('panel-content-inner-text')).html(html); return this; }, rotateTitleIcon: function (angle, duration) { var arr = this.title.find(nsSel('panel-title-arrow')); arr.animate({angle: angle}, { duration : (typeof duration == 'number') ? duration : 500, easing : 'easeOutExpo', step : function (val, fx) { arr.css({ '-o-transform' : 'rotate(' + val + 'deg)', '-webkit-transform' : 'rotate(' + val + 'deg)', '-moz-transform' : 'rotate(' + val + 'deg)', '-ms-transform' : 'rotate(' + val + 'deg)' // filter : 'progid:DXImageTransform.Microsoft.BasicImage(rotation=' + (angle / 90) + ')' }); } }); }, /** * Walks up the ancestors chain for the given effective element, and * renders subpanels using the specified renderer function. * * @param {jQuery} effective - The effective element, whose lineage we * want to render * @param {Function} renderer - (Optional) function that will render * each element in the parental lineage * of the effective element */ renderEffectiveParents: function (effective, renderer) { var el = effective.first(); var content = []; var path = []; var activeOn = this.activeOn; var l; var pathRev; while (el.length > 0 && !el.is('.aloha-editable')) { if (activeOn(el)) { path.push('<span>' + el[0].tagName.toLowerCase() + '</span>'); l = path.length; pathRev = []; while (l--) { pathRev.push(path[l]); } content.push(supplant( '<div class="aloha-sidebar-panel-parent">' + '<div class="aloha-sidebar-panel-parent-path">{path}</div>' + '<div class="aloha-sidebar-panel-parent-content aloha-sidebar-opened">{content}</div>' + '</div>', { path : pathRev.join(''), content : (typeof renderer == 'function') ? renderer(el) : '----' } )); } el = el.parent(); } this.setContent(content.join('')); jQuery('.aloha-sidebar-panel-parent-path').click(function () { var c = jQuery(this).parent().find('.aloha-sidebar-panel-parent-content'); if (c.hasClass('aloha-sidebar-opened')) { c.hide().removeClass('aloha-sidebar-opened'); } else { c.show().addClass('aloha-sidebar-opened'); } }); this.content.height('auto').find('.aloha-sidebar-panel-content-inner').height('auto'); } }); var left = new Sidebar({ position : 'left', width : 250 // TODO define in config }); var right = new Sidebar({ position : 'right', width : 250 // TODO define in config }); Aloha.Sidebar = { left : left, right : right }; return Aloha.Sidebar; });
PypiClean
/JaqalPaw-1.2.0a1.tar.gz/JaqalPaw-1.2.0a1/src/jaqalpaw/emulator/arbiters.py
import time import asyncio from jaqalpaw.bytecode.encoding_parameters import ( CLR_FRAME_LSB, APPLY_EOF_LSB, ANCILLA_COMPILER_TAG_BIT, ANCILLA_STATE_LSB, FWD_FRM_T0_LSB, INV_FRM_T0_LSB, FRMROT0INT, FRMROT1INT, ) from .byte_decoding import * def construct_fifos( num_spline_fifos=8, num_channels=8, spline_fifo_depth=4, gate_seq_fifo_depth=32, dma_depth=256, ): spline_fifos = [ [asyncio.Queue(maxsize=spline_fifo_depth) for _ in range(num_spline_fifos)] for _ in range(num_channels) ] gseq_fifos = [ asyncio.Queue(maxsize=gate_seq_fifo_depth) for _ in range(num_channels) ] dma_queue = asyncio.Queue(maxsize=dma_depth) return spline_fifos, gseq_fifos, dma_queue async def DMA_arbiter(name, queue, data_output_queues): """Send data to the correct channel (or gate sequencer input FIFO) based on metadata""" while True: raw_data = await queue.get() data = int.from_bytes(raw_data, byteorder="little", signed=False) channel = (data >> DMA_MUX_LSB) & 0b111 await data_output_queues[channel].put(raw_data) queue.task_done() async def gate_seq_arbiter(name, queue, data_output_queues): """Performs the same functions as the hardware GateSequencer IP cores. Input words are 256 bits, and are parsed and treated accordingly depending on the metadata tags in the raw data in order to program LUTs or run gate sequences etc... If the LUTs are not being programmed, the resulting output is sent to the spline engine FIFOs""" while True: raw_data = await queue.get() data = int.from_bytes(raw_data, byteorder="little", signed=False) mod_type = (data >> MODTYPE_LSB) & 0b111 prog_mode = (data >> PROG_MODE_LSB) & 0b111 if prog_mode == 0b111: await data_output_queues[mod_type].put(raw_data) elif prog_mode == 0b001: parse_GLUT_prog_data(data) elif prog_mode == 0b010: parse_SLUT_prog_data(data) elif prog_mode == 0b011: parse_PLUT_prog_data(raw_data) elif prog_mode == 0b100 or prog_mode == 0b101 or prog_mode == 0b110: if prog_mode == 0b101 or prog_mode == 0b110: # at the very least we'll need the streamed data to be augmented # by the tag bit. Additional cases can be applied by ORing the # "OR address", oraddr, (representing external hardware input) # via some (binary) state by adding the following line after # oraddr is initially set to 1 << ANCILLA_COMPILER_TAG_BIT # # oraddr |= state << ANCILLA_STATE_LSB # # to execute a different branch. But this is not yet worked into # the emulator in a way that supports a sequence of ancilla # measurement states. oraddr = 1 << ANCILLA_COMPILER_TAG_BIT else: oraddr = 0 for gs_data in parse_gate_seq_data(data, oraddr=oraddr): new_mod_type = ( int.from_bytes(gs_data, byteorder="little", signed=False) >> MODTYPE_LSB ) & 0b111 await data_output_queues[new_mod_type].put(gs_data) queue.task_done() async def spline_engine( name, queue, time_list, data_list, waittrig_list, enablemask_list, fwd_frame0_mask_list, inv_frame0_mask_list, fwd_frame1_mask_list, inv_frame1_mask_list, ): """Converts the spline coefficients to a format that can be passed into a SplineEngine emulator, which generates the corresponding output and stores the data in time_list and data_list for plotting and/or inspecting the data""" eof_data = 0 while True: raw_data = await queue.get() data = int.from_bytes(raw_data, byteorder="little", signed=False) waittrig = (data >> WAIT_TRIG_LSB) & 0b1 enablemask = (data >> OUTPUT_EN_LSB) & 0b1 fwd_frame0_mask = 0 inv_frame0_mask = 0 fwd_frame1_mask = 0 inv_frame1_mask = 0 mod_type = (data >> MODTYPE_LSB) & 0b111 if mod_type == FRMROT0INT: fwd_frame0_mask = (data >> FWD_FRM_T0_LSB) & 0b11 inv_frame0_mask = (data >> INV_FRM_T0_LSB) & 0b11 elif mod_type == FRMROT1INT: fwd_frame1_mask = (data >> FWD_FRM_T0_LSB) & 0b11 inv_frame1_mask = (data >> INV_FRM_T0_LSB) & 0b11 shift = (data >> SPLSHIFT_LSB) & 0b11111 channel = (data >> DMA_MUX_LSB) & 0b111 reset_accum = (data >> CLR_FRAME_LSB) & 0b1 apply_at_eof = (data >> APPLY_EOF_LSB) & 0b1 dur, U0, U1, U2, U3 = parse_bypass_data(raw_data) # Convert binary values to real-unit equivalents for monitoring # dur += TIMECORR+0#+4 dur_real = convert_time_from_clock_cycles(dur) U0_real = mod_type_dict[mod_type]["realConvFunc"](U0) U1_real = mod_type_dict[mod_type]["realConvFunc"](U1) U2_real = mod_type_dict[mod_type]["realConvFunc"](U2) U3_real = mod_type_dict[mod_type]["realConvFunc"](U3) # This if statement is not absolutely necessary but reduces number of points to plot, forcing # the function to always jump to the else clause will produce the same output and is more # consistent with how the hardware is actually operating. if U1 == 0 and U2 == 0 and U3 == 0: time_list.append(time_list[-1] + dur) if mod_type in ( FRMROT0INT, FRMROT1INT, ): # then we have a z rotation which must accumulate from old values if reset_accum: last_val = 0 eof_data = 0 else: last_val = data_list[-1] if apply_at_eof: data_list.append(last_val + eof_data) eof_data = U0_real else: data_list.append(last_val + U0_real + eof_data) eof_data = 0 if mod_type == FRMROT0INT: fwd_frame0_mask_list.append(fwd_frame0_mask) inv_frame0_mask_list.append(inv_frame0_mask) else: fwd_frame1_mask_list.append(fwd_frame1_mask) inv_frame1_mask_list.append(inv_frame1_mask) else: data_list.append(U0_real) waittrig_list[-1] = waittrig waittrig_list.append(0) enablemask_list[-1] = enablemask enablemask_list.append(0) print( f"channel: {channel}, type: {mod_type}, Duration: {dur_real} s, U0: {U0_real}, U1: {U1_real}, U2: {U2_real}, U3: {U3_real}" ) else: # Bit shifting is done to enhance precision within firmware U1_shift = U1 U2_shift = U2 U3_shift = U3 # Calculate the same for real values for monitoring purposes only U1_rshift = U1_real / (1 << shift) U2_rshift = U2_real / (1 << (shift * 2)) U3_rshift = U3_real / (1 << (shift * 3)) # Pack the coefficients in a format that can be handled by the spline engine emulator coeffs = np.zeros((4, 1)) coeffs[0, 0] = U3_shift coeffs[1, 0] = U2_shift coeffs[2, 0] = U1_shift coeffs[3, 0] = U0 # The additional 3 clock cycles are related to a subtle hardware issue xdata = np.array(list(range(dur))) + 1 spline_data = pdq_spline(coeffs, [0], nsteps=dur, shift=shift) spline_data_real = list( map(mod_type_dict[mod_type]["realConvFunc"], spline_data) ) xdata_real = list(map(lambda x: time_list[-1] + x, xdata)) time_list.extend(xdata_real[:]) last_val = data_list[-1] if mod_type in ( FRMROT0INT, FRMROT1INT, ): # then we have a z rotation which must accumulate from old values if reset_accum: last_val = 0 eof_data = 0 data_list.extend(last_val + eof_data + np.array(spline_data_real)) eof_data = 0 if mod_type == FRMROT0INT: fwd_frame0_mask_list.extend([fwd_frame0_mask] * len(xdata_real)) inv_frame0_mask_list.extend([inv_frame0_mask] * len(xdata_real)) else: fwd_frame1_mask_list.extend([fwd_frame1_mask] * len(xdata_real)) inv_frame1_mask_list.extend([inv_frame1_mask] * len(xdata_real)) else: data_list.extend(spline_data_real) print( f"channel: {channel}, type: {mod_type}, Duration: {dur_real} s, " f"U0: {U0_real}, U1: {U1_rshift}, U2: {U2_rshift}, U3: {U3_rshift}" ) # For good measure, wait for the duration encoded in the raw data, for more accurate emulation, this duration # should be scaled so as to reduce the influence imposed by computational delay # await asyncio.sleep(dur_real) queue.task_done()
PypiClean
/Color_Match-0.0.1-py3-none-any.whl/Color_Match/color_match_tmp.py
import numpy as np class Color_Match: def __init__(self): self.hc_kB = 0.0143877735 data = np.loadtxt('lin2012xyz10_fine_7sf.csv', delimiter=',') self.wavelengths = data[:,0] * 10**-9 self.s1 = data[:,1] self.s2 = data[:,2] self.s3 = data[:,3] def planck(self, l, t): """Returns the relative intensity of a Planck's Law distribution at a given wavelength (l -- in meters) and temperature (t -- in Kelvin).""" return 1. / l**5.0 * 1. / (np.exp(self.hc_kB / (l * t)) - 1.) def planck_spectrum(self, t): """Returns the relative intensities of the full Planck spectrum for a light source at a given temperature (t -- in Kelvin), across the entire range of human-visible frequencies, as defined in Color_Match.wavelengths .""" return np.array([planck(l, t) for l in self.wavelengths]) def sense_vector(spectrum): """Returns the expected sensory-perception vector corresponding to the normalized (and relative) amounts of signal received on the L(ong), M(edium), and S(hort) wavelength color receptors according to the 10-deg XYZ CMFs transformed from the CIE (2006) 2-deg LMS cone fundamentals with a 0.1nm spacing from: http://cvri.ioo.ucl.ac.uk/cmfs.htm Return value format is as a numpy Matrix, 3x1 (aka - 3-row column vector), to facilitate use of this output with other package functions. Note that length(spectrum) must equal length(Color_Match.wavelengths) for this to work properly. Automatic tests and error-catching coming soon!""" s = np.matrix([np.sum(self.s1 * spectrum), np.sum(self.s2 * spectrum), np.sum(self.s3 * spectrum)]).T return s / np.max(s) def s_lookup(self, num, l): """num = 1, 2, 3 (corresponds to L(ong), M(edium), S(hort) wavelength receptors) l (wavelength) should be in units of meters, and lie between np.min(Color_Match.wavelengths) and np.max(Color_Match.wavelengths)""" if num == 1: s = self.s1 elif num == 2: s = self.s2 elif num == 3: s = s3 return s[np.argmin((self.wavelengths - l)**2.0)] def rgb_composition(l1, l2, l3, sv): """Finds relative intensities required for light sources at l1, l2, and l3 in wavelength (meters) space to match the sense vector (sv) provided in the arguments. sv should be a 3x1 np.Matrix (aka - 3-row column vector). For convenience, this is the same as the output of the Color_Match.sense_vector function. l1, l2, l3 should have units of meters, and are most commonly the primary emission wavelengths of your RGB channels in an LED light source.""" s_mat = np.matrix([[self.s_lookup(1, l1), self.s_lookup(1, l2), self.s_lookup(1, l3)], [self.s_lookup(2, l1), self.s_lookup(2, l2), self.s_lookup(2, l3)], [self.s_lookup(3, l1), self.s_lookup(3, l2), self.s_lookup(3, l3)]]) c_mat = s_mat.I * sv return c_mat / np.max(c_mat)
PypiClean
/InvestOpenDataTools-1.0.2.tar.gz/InvestOpenDataTools-1.0.2/opendatatools/stock/stock_interface.py
import datetime import pandas as pd from .stock_agent import SHExAgent, SZExAgent, CSIAgent, XueqiuAgent, SinaAgent, CNInfoAgent, EastMoneyAgent from opendatatools.common import get_current_day shex_agent = SHExAgent() szex_agent = SZExAgent() csi_agent = CSIAgent() xq_agent = XueqiuAgent() sina_agent = SinaAgent() cninfo_agent = CNInfoAgent() eastmoney_agent = EastMoneyAgent() xq_count_map = { '1m': 240, '5m': 48, '15m': 16, '30m': 8, '60m': 4, 'day': 1, } bar_span_map = { '1m': 1, '5m': 5, '15m': 15, '30m': 30, '60m': 60, 'day': 1440, } def make_index(period, trade_date): bar_index = list() span = bar_span_map[period] dt = datetime.datetime.strptime(trade_date, '%Y-%m-%d') bar_index.extend( pd.DatetimeIndex(start="%s 09:30:00" % trade_date, end="%s 11:30:00" % trade_date, freq='%sT' % span)[1:]) bar_index.extend( pd.DatetimeIndex(start="%s 13:00:00" % trade_date, end="%s 15:00:00" % trade_date, freq='%sT' % span)[1:]) return bar_index def set_proxies(proxies): shex_agent.set_proxies(proxies) szex_agent.set_proxies(proxies) csi_agent.set_proxies(proxies) xq_agent.set_proxies(proxies) def get_index_list(market='SH'): if market == 'SH': return shex_agent.get_index_list() if market == 'SZ': return szex_agent.get_index_list() if market == 'CSI': return csi_agent.get_index_list() def get_index_component(symbol): temp = symbol.split(".") if len(temp) == 2: market = temp[1] index = temp[0] if market == 'SH': return shex_agent.get_index_component(index) elif market == 'SZ': return szex_agent.get_index_component(index) elif market == 'CSI': return csi_agent.get_index_component(index) else: return None else: return None def get_rzrq_info(market='SH', date=None): if date is None: date = get_current_day(format='%Y-%m-%d') if market == 'SH': return shex_agent.get_rzrq_info(date) if market == 'SZ': return szex_agent.get_rzrq_info(date) return None, None def get_pledge_info(market='SH', date=None): if date is None: date = get_current_day(format='%Y-%m-%d') if market == 'SH': return shex_agent.get_pledge_info(date) if market == 'SZ': return szex_agent.get_pledge_info(date) return None, None def get_dividend(symbol): temp = symbol.split(".") if len(temp) == 2: market = temp[1] code = temp[0] if market == 'SH': return shex_agent.get_dividend(code) if market == 'SZ': return cninfo_agent.get_dividend(code) def get_quote(symbols): return xq_agent.get_quote(symbols) def fill_df(df, period, trade_date, symbol): df.index = df['time'] index = make_index(period, trade_date) df_new = pd.DataFrame(index=index, columns=['last']) df_new['last'] = df['last'] df_new.fillna(method='ffill', inplace=True) df_new['high'] = df['high'] df_new['low'] = df['low'] df_new['open'] = df['open'] df_new.fillna(method='ffill', axis=1, inplace=True) df_new['change'] = df['change'] df_new['percent'] = df['percent'] df_new['symbol'] = symbol df_new['turnover_rate'] = df['turnover_rate'] df_new['volume'] = df['volume'] df_new['time'] = df_new.index df_new.fillna(0, inplace=True) return df_new # period 1m, 5m, 15m, 30m, 60m, day def get_kline(symbol, start_date, end_date, period='day'): start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d') end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d') + datetime.timedelta(days=1) timestamp = end_date.timestamp() cnt = (end_date - start_date).days * -1 * xq_count_map[period] timestamp = int(timestamp * 1000) df, msg = xq_agent.get_kline(symbol, timestamp, period, cnt) if len(df) == 0: return df, msg df = df[(df.time < end_date) & (df.time >= start_date)] return df, '' def get_kline_multisymbol(symbols, trade_date, period): symbol_list = symbols.split(',') timestamp = datetime.datetime.strptime(trade_date, '%Y-%m-%d').timestamp() timestamp = int(timestamp * 1000) df, msg = xq_agent.get_kline_multisymbol(symbol_list, timestamp, period, xq_count_map[period]) next_date = datetime.datetime.strptime(trade_date, '%Y-%m-%d') + datetime.timedelta(days=1) if df is None: return df, msg df = df[df.time < next_date] gp = df.groupby('symbol') df_list = list() for symbol, df_item in gp: if len(df_item) < xq_count_map[period]: df_list.append(fill_df(df_item, period, trade_date, symbol)) else: df_list(df_item) return pd.concat(df_list), '' def get_timestamp_list(start_date, end_date): timestamp_list = [] curr_date = start_date while curr_date <= end_date: curr_datetime = datetime.datetime.strptime(curr_date, '%Y-%m-%d') timestamp = curr_datetime.timestamp() timestamp_list.append(int(timestamp * 1000)) next_time = curr_datetime + datetime.timedelta(days=1) curr_date = datetime.datetime.strftime(next_time, '%Y-%m-%d') return timestamp_list def get_kline_multidate(symbol, start_date, end_date, period): timestamp_list = get_timestamp_list(start_date, end_date) return xq_agent.get_kline_multitimestamp(symbol, timestamp_list, period, xq_count_map[period]) def get_daily(symbol, start_date, end_date): curr_date = start_date df_result = [] while curr_date <= end_date: curr_datetime = datetime.datetime.strptime(curr_date, '%Y-%m-%d') next_time = curr_datetime + datetime.timedelta(days=100) next_date = datetime.datetime.strftime(next_time, '%Y-%m-%d') timestamp = curr_datetime.timestamp() df, msg = xq_agent.get_kline(symbol, int(timestamp * 1000), 'day', 100) if len(df) != 0: df_result.append(df[df['time'] < next_time]) curr_date = next_date if len(df_result) > 0: df = pd.concat(df_result) df = df[(df['time'] >= start_date) & (df['time'] <= end_date)] return df, '' else: return None, '没有获取到数据' def get_adj_factor(symbol): return sina_agent.get_adj_factor(symbol) def get_trade_detail(symbol, trade_date): return sina_agent.get_trade_detail(symbol, trade_date) def get_report_data(symbol='600000.SH', type='资产负债表'): dict_type = { '利润表': 'lrb', '资产负债表': 'fzb', '现金流量表': 'llb', } if type not in dict_type: return None, 'type输入错误,可以输入 %s' % dict_type.keys() data = symbol.split(sep='.') market = data[1].lower() code = data[0] return cninfo_agent.get_report_data(market, code, dict_type[type]) def get_shareholder_structure(symbol='600000.SH'): data = symbol.split(sep='.') market = data[1].lower() code = data[0] return cninfo_agent.get_shareholder_structure(market, code) # 单位:百万元 def get_hist_money_flow(symbol): data = symbol.split(sep='.') market = data[1] if market == 'SH': marketnum = '1' else: marketnum = '2' code = data[0] + marketnum return eastmoney_agent.get_hist_money_flow(code) # 单位:万元 def get_realtime_money_flow(symbol): data = symbol.split(sep='.') market = data[1] if market == 'SH': marketnum = '1' else: marketnum = '2' code = data[0] + marketnum return eastmoney_agent.get_realtime_money_flow(code) # 单位:亿元 def get_realtime_money_flow_market(): return eastmoney_agent.get_realtime_money_flow_market() def get_hist_money_flow_market(): return eastmoney_agent.get_hist_money_flow_market() def get_allstock_flow(): return eastmoney_agent.get_allstock_flow()
PypiClean
/Andencento-0.24.tar.gz/Andencento-0.24/userbot/helpers/fasttelethon.py
import asyncio import hashlib import inspect import logging import math import os from collections import defaultdict from typing import (AsyncGenerator, Awaitable, BinaryIO, DefaultDict, List, Optional, Tuple, Union) from telethon import TelegramClient, helpers, utils from telethon.crypto import AuthKey from telethon.errors import FloodWaitError from telethon.network import MTProtoSender from telethon.tl.alltlobjects import LAYER from telethon.tl.functions import InvokeWithLayerRequest from telethon.tl.functions.auth import (ExportAuthorizationRequest, ImportAuthorizationRequest) from telethon.tl.functions.upload import (GetFileRequest, SaveBigFilePartRequest, SaveFilePartRequest) from telethon.tl.types import (Document, InputDocumentFileLocation, InputFile, InputFileBig, InputFileLocation, InputPeerPhotoFileLocation, InputPhotoFileLocation, TypeInputFile) try: from mautrix.crypto.attachments import async_encrypt_attachment except ImportError: async_encrypt_attachment = None log: logging.Logger = logging.getLogger("fasttelethon") TypeLocation = Union[ Document, InputDocumentFileLocation, InputPeerPhotoFileLocation, InputFileLocation, InputPhotoFileLocation, ] class DownloadSender: client: TelegramClient sender: MTProtoSender request: GetFileRequest remaining: int stride: int def __init__( self, client: TelegramClient, sender: MTProtoSender, file: TypeLocation, offset: int, limit: int, stride: int, count: int, ) -> None: self.sender = sender self.client = client self.request = GetFileRequest(file, offset=offset, limit=limit) self.stride = stride self.remaining = count async def next(self) -> Optional[bytes]: if not self.remaining: return None while True: try: result = await self.client._call(self.sender, self.request) except FloodWaitError as e: await asyncio.sleep(e.seconds) else: break self.remaining -= 1 self.request.offset += self.stride return result.bytes def disconnect(self) -> Awaitable[None]: return self.sender.disconnect() class UploadSender: client: TelegramClient sender: MTProtoSender request: Union[SaveFilePartRequest, SaveBigFilePartRequest] part_count: int stride: int previous: Optional[asyncio.Task] loop: asyncio.AbstractEventLoop def __init__( self, client: TelegramClient, sender: MTProtoSender, file_id: int, part_count: int, big: bool, index: int, stride: int, loop: asyncio.AbstractEventLoop, ) -> None: self.client = client self.sender = sender self.part_count = part_count if big: self.request = SaveBigFilePartRequest(file_id, index, part_count, b"") else: self.request = SaveFilePartRequest(file_id, index, b"") self.stride = stride self.previous = None self.loop = loop async def next(self, data: bytes) -> None: if self.previous: await self.previous self.previous = self.loop.create_task(self._next(data)) async def _next(self, data: bytes) -> None: self.request.bytes = data log.debug( f"Sending file part {self.request.file_part}/{self.part_count}" f" with {len(data)} bytes" ) await self.client._call(self.sender, self.request) self.request.file_part += self.stride async def disconnect(self) -> None: if self.previous: await self.previous return await self.sender.disconnect() class ParallelTransferrer: client: TelegramClient loop: asyncio.AbstractEventLoop dc_id: int senders: Optional[List[Union[DownloadSender, UploadSender]]] auth_key: AuthKey upload_ticker: int def __init__(self, client: TelegramClient, dc_id: Optional[int] = None) -> None: self.client = client self.loop = self.client.loop self.dc_id = dc_id or self.client.session.dc_id self.auth_key = ( None if dc_id and self.client.session.dc_id != dc_id else self.client.session.auth_key ) self.senders = None self.upload_ticker = 0 async def _cleanup(self) -> None: await asyncio.gather(*[sender.disconnect() for sender in self.senders]) self.senders = None @staticmethod def _get_connection_count( file_size: int, max_count: int = 20, full_size: int = 100 * 1024 * 1024 ) -> int: if file_size > full_size: return max_count return math.ceil((file_size / full_size) * max_count) async def _init_download( self, connections: int, file: TypeLocation, part_count: int, part_size: int ) -> None: minimum, remainder = divmod(part_count, connections) def get_part_count() -> int: nonlocal remainder if remainder > 0: remainder -= 1 return minimum + 1 return minimum # The first cross-DC sender will export+import the authorization, so we always create it # before creating any other senders. self.senders = [ await self._create_download_sender( file, 0, part_size, connections * part_size, get_part_count() ), *await asyncio.gather( *[ self._create_download_sender( file, i, part_size, connections * part_size, get_part_count() ) for i in range(1, connections) ] ), ] async def _create_download_sender( self, file: TypeLocation, index: int, part_size: int, stride: int, part_count: int, ) -> DownloadSender: return DownloadSender( self.client, await self._create_sender(), file, index * part_size, part_size, stride, part_count, ) async def _init_upload( self, connections: int, file_id: int, part_count: int, big: bool ) -> None: self.senders = [ await self._create_upload_sender(file_id, part_count, big, 0, connections), *await asyncio.gather( *[ self._create_upload_sender(file_id, part_count, big, i, connections) for i in range(1, connections) ] ), ] async def _create_upload_sender( self, file_id: int, part_count: int, big: bool, index: int, stride: int ) -> UploadSender: return UploadSender( self.client, await self._create_sender(), file_id, part_count, big, index, stride, loop=self.loop, ) async def _create_sender(self) -> MTProtoSender: dc = await self.client._get_dc(self.dc_id) sender = MTProtoSender(self.auth_key, loggers=self.client._log) await sender.connect( self.client._connection( dc.ip_address, dc.port, dc.id, loggers=self.client._log, proxy=self.client._proxy, ) ) if not self.auth_key: log.debug(f"Exporting auth to DC {self.dc_id}") auth = await self.client(ExportAuthorizationRequest(self.dc_id)) self.client._init_request.query = ImportAuthorizationRequest( id=auth.id, bytes=auth.bytes ) req = InvokeWithLayerRequest(LAYER, self.client._init_request) await sender.send(req) self.auth_key = sender.auth_key return sender async def init_upload( self, file_id: int, file_size: int, part_size_kb: Optional[float] = None, connection_count: Optional[int] = None, ) -> Tuple[int, int, bool]: connection_count = connection_count or self._get_connection_count(file_size) part_size = (part_size_kb or utils.get_appropriated_part_size(file_size)) * 1024 part_count = (file_size + part_size - 1) // part_size is_large = file_size > 10 * 1024 * 1024 await self._init_upload(connection_count, file_id, part_count, is_large) return part_size, part_count, is_large async def upload(self, part: bytes) -> None: await self.senders[self.upload_ticker].next(part) self.upload_ticker = (self.upload_ticker + 1) % len(self.senders) async def finish_upload(self) -> None: await self._cleanup() async def download( self, file: TypeLocation, file_size: int, part_size_kb: Optional[float] = None, connection_count: Optional[int] = None, ) -> AsyncGenerator[bytes, None]: connection_count = connection_count or self._get_connection_count(file_size) part_size = (part_size_kb or utils.get_appropriated_part_size(file_size)) * 1024 part_count = math.ceil(file_size / part_size) log.debug( "Starting parallel download: " f"{connection_count} {part_size} {part_count} {file!s}" ) await self._init_download(connection_count, file, part_count, part_size) part = 0 while part < part_count: tasks = [self.loop.create_task(sender.next()) for sender in self.senders] for task in tasks: data = await task if not data: break yield data part += 1 log.debug(f"Part {part} downloaded") log.debug("Parallel download finished, cleaning up connections") await self._cleanup() parallel_transfer_locks: DefaultDict[int, asyncio.Lock] = defaultdict( lambda: asyncio.Lock() ) def stream_file(file_to_stream: BinaryIO, chunk_size=1024): while True: data_read = file_to_stream.read(chunk_size) if not data_read: break yield data_read async def _internal_transfer_to_telegram( client: TelegramClient, response: BinaryIO, progress_callback: callable ) -> Tuple[TypeInputFile, int]: file_id = helpers.generate_random_long() file_size = os.path.getsize(response.name) hash_md5 = hashlib.md5() uploader = ParallelTransferrer(client) part_size, part_count, is_large = await uploader.init_upload(file_id, file_size) buffer = bytearray() for data in stream_file(response): if progress_callback: r = progress_callback(response.tell(), file_size) if inspect.isawaitable(r): await r if not is_large: hash_md5.update(data) if len(buffer) == 0 and len(data) == part_size: await uploader.upload(data) continue new_len = len(buffer) + len(data) if new_len >= part_size: cutoff = part_size - len(buffer) buffer.extend(data[:cutoff]) await uploader.upload(bytes(buffer)) buffer.clear() buffer.extend(data[cutoff:]) else: buffer.extend(data) if len(buffer) > 0: await uploader.upload(bytes(buffer)) await uploader.finish_upload() if is_large: return InputFileBig(file_id, part_count, "upload"), file_size else: return InputFile(file_id, part_count, "upload", hash_md5.hexdigest()), file_size async def download_file( client: TelegramClient, location: TypeLocation, out: BinaryIO, progress_callback: callable = None, ) -> BinaryIO: size = location.size dc_id, location = utils.get_input_location(location) # We lock the transfers because telegram has connection count limits downloader = ParallelTransferrer(client, dc_id) downloaded = downloader.download(location, size) async for x in downloaded: out.write(x) if progress_callback: r = progress_callback(out.tell(), size) if inspect.isawaitable(r): await r return out async def upload_file( client: TelegramClient, file: BinaryIO, progress_callback: callable = None, ) -> TypeInputFile: return (await _internal_transfer_to_telegram(client, file, progress_callback))[0]
PypiClean
/Eskapade_Core-1.0.0-py3-none-any.whl/escore/core/definitions.py
import ast import collections import os from enum import IntEnum, unique from pkg_resources import resource_filename from escore.logger import LogLevel @unique class StatusCode(IntEnum): """Return status code enumeration class. A StatusCode should be returned by the initialize, execute, and finalize methods of links, chains, and the process manager. The enumerations are: * Undefined (-1): Default status. * Success (0 == EX_OK / EXIT_SUCCESS): All OK, i.e. there were no errors. * RepeatChain (1): Repeat execution of this chain. * SkipChain (2): Skip this chain: initialize, execute, and finalize. * BreakChain (3): Skip the further execution of this this, but do perform finalize. * Recoverable (4): Not OK, but can continue, i.e. there was an error, but the application can recover from it. * Failure (5): An error occurred and the application cannot recover from it. In this case the application should just quit. """ Undefined = -1 # type: int Success = 0 # type: int RepeatChain = 1 # type: int SkipChain = 2 # type: int BreakChain = 3 # type: int Recoverable = 4 # type: int Failure = 5 # type: int def __str__(self) -> str: """Get string representation of :class:`StatusCode`. :return: String representation of :class:`StatusCode`. :rtype: str """ return self.name def is_undefined(self) -> bool: """Check if status is `StatusCode.Undefined`. :return: True when `StatusCode.Undefined`, False otherwise. :rtype: bool """ return StatusCode.Undefined == self def is_success(self) -> bool: """Check if status is `StatusCode.Success`. :return: True when `StatusCode.Success`, False otherwise. :rtype: bool """ return StatusCode.Success == self def is_repeat_chain(self) -> bool: """Check if status is `StatusCode.RepeatChain`. :return: True when `StatusCode.RepeatChain`, False otherwise. :rtype: bool """ return StatusCode.RepeatChain == self def is_skip_chain(self) -> bool: """Check if status is `StatusCode.SkipChain`. :return: True when `StatusCode.SkipChain`, False otherwise. :rtype: bool """ return StatusCode.SkipChain == self def is_break_chain(self) -> bool: """Check if status is `StatusCode.BreakChain`. :return: True when `StatusCode.BreakChain`, False otherwise. :rtype: bool """ return StatusCode.BreakChain == self def is_recoverable(self) -> bool: """Check if status is `StatusCode.Recoverable`. :return: True when `StatusCode.Recoverable`, False otherwise. :rtype: bool """ return StatusCode.Recoverable == self def is_failure(self) -> bool: """Check if status is `StatusCode.Failure`. :return: True when `StatusCode.Failure`, False otherwise. :rtype: bool """ return StatusCode.Failure == self class RandomSeeds: """Container for seeds of random generators. Seeds are stored as key-value pairs and are accessed with getitem and setitem methods. A default seed can be accessed with the key "default". The default seed is also returned if no seed is set for the specified key. >>> import numpy as np >>> seeds = RandomSeeds(default=999, foo=42, bar=13) >>> seeds['NumPy'] = 100 >>> np.random.seed(seeds['NumPy']) >>> print(seeds['nosuchseed']) 999 """ def __init__(self, **kwargs): """Initialize an instance. Values of the specified keyword arguments must be integers, which are set as seed values for the corresponding key. """ # initialize attributes self._seeds = {} self._default = 1 # set specified seeds for key, seed in kwargs.items(): self[key] = seed def __getitem__(self, key): """Return seed for specified lowercase-string key.""" return self._seeds.get(str(key).strip().lower(), self._default) def __setitem__(self, key, seed): """Set integer seed for specified lowercase-string key.""" # parse key and seed key = str(key).strip().lower() try: seed = int(seed) except Exception: raise TypeError('specified seed for key "{0:s}" is not an integer: "{1!s}"'.format(key, seed)) # check if this is the default key if key == 'default': self._default = seed else: self._seeds[key] = seed def __str__(self): seed_str = ', '.join('{0:s}: {1:d}'.format(*kv) for kv in self._seeds.items()) return '{{default: {0:d} | {1:s}}}'.format(self._default, seed_str) # configuration variables CONFIG_VARS = collections.OrderedDict() CONFIG_VARS['run'] = ['analysisName', 'version', 'macro', 'batchMode', 'interactive', 'logLevel', 'logFormat', 'doCodeProfiling', ] CONFIG_VARS['chains'] = ['beginWithChain', 'endWithChain', 'storeResultsEachChain', 'storeResultsOneChain', 'doNotStoreResults', ] CONFIG_VARS['file_io'] = ['esRoot', 'resultsDir', 'dataDir', 'macrosDir', 'templatesDir', 'configDir', ] CONFIG_VARS['config'] = ['sparkCfgFile', ] CONFIG_VARS['db_io'] = ['all_mongo_collections', ] CONFIG_VARS['rand_gen'] = ['seeds', ] CONFIG_TYPES = dict(version=int, batchMode=bool, interactive=bool, storeResultsEachChain=bool, doNotStoreResults=bool, all_mongo_collections=list, ) CONFIG_DEFAULTS = dict(analysisName='MyAnalysis', version=0, batchMode=True, interactive=False, logLevel=LogLevel.INFO, logFormat='%(asctime)s %(levelname)s [%(module)s]: %(message)s', doCodeProfiling=None, storeResultsEachChain=False, doNotStoreResults=False, esRoot=os.getcwd() + '/', resultsDir=os.getcwd() + '/results/', dataDir=os.getcwd() + '/data/', macrosDir=os.getcwd() + '/macros/', templatesDir=resource_filename('escore', 'templates') + '/', configDir=os.getcwd() + '/config/', sparkCfgFile='spark.cfg', seeds=RandomSeeds(), ) # user options in command-line arguments USER_OPTS = collections.OrderedDict() USER_OPTS['run'] = ['analysis_name', 'analysis_version', 'batch_mode', 'interactive', 'log_level', 'log_format', 'unpickle_config', 'profile', 'conf_var', ] USER_OPTS['chains'] = ['begin_with', 'end_with', 'single_chain', 'store_all', 'store_one', 'store_none', ] USER_OPTS['file_io'] = ['results_dir', 'data_dir', 'macros_dir', 'templates_dir', ] USER_OPTS['config'] = ['spark_cfg_file', ] USER_OPTS['rand_gen'] = ['seed', ] USER_OPTS_SHORT = dict(analysis_name='n', analysis_version='v', interactive='i', log_level='L', conf_var='c', begin_with='b', end_with='e', single_chain='s', ) USER_OPTS_KWARGS = dict(analysis_name=dict(help='set name of analysis in run', metavar='NAME'), analysis_version=dict(help='set version of analysis version in run', type=int, metavar='VERSION'), batch_mode=dict(help='run in batch mode (no X Windows)', action='store_true'), interactive=dict(help='start Python shell after run', action='store_true'), log_level=dict(help='set logging level', choices=['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL'], metavar='{NOTSET,DEBUG,INFO,WARNING,ERROR,FATAL}'), log_format=dict(help='set log-message format', metavar='FORMAT'), unpickle_config=dict(help='interpret first CONFIG_FILE as path to pickled settings', action='store_true'), profile=dict(help='run Python profiler, sort output by specified column', choices=['stdname', 'nfl', 'pcalls', 'file', 'calls', 'time', 'line', 'cumulative', 'module', 'name'], metavar='{stdname,nfl,pcalls,file,calls,time,line,cumulative,module,name}'), conf_var=dict(help='set configuration variable', action='append', metavar='KEY=VALUE'), begin_with=dict(help='begin execution with chain CHAIN_NAME', metavar='CHAIN_NAME'), end_with=dict(help='end execution with chain CHAIN_NAME', metavar='CHAIN_NAME'), single_chain=dict(help='only execute chain CHAIN_NAME', metavar='CHAIN_NAME'), store_all=dict(help='store run-process services after every chain', action='store_true'), store_one=dict(help='store run-process services after chain CHAIN_NAME', metavar='CHAIN_NAME'), store_none=dict(help='do not store run-process services', action='store_true'), results_dir=dict(help='set directory path for results output', metavar='RESULTS_DIR'), data_dir=dict(help='set directory path for data', metavar='DATA_DIR'), macros_dir=dict(help='set directory path for macros', metavar='MACROS_DIR'), templates_dir=dict(help='set directory path for template files', metavar='TEMPLATES_DIR'), spark_cfg_file=dict(help='set path of Spark configuration file', metavar='SPARK_CONFIG_FILE'), seed=dict(help='set seed for random-number generation', action='append', metavar='KEY=SEED'), ) USER_OPTS_CONF_KEYS = dict(analysis_name='analysisName', analysis_version='version', batch_mode='batchMode', log_level='logLevel', log_format='logFormat', profile='doCodeProfiling', begin_with='beginWithChain', end_with='endWithChain', store_all='storeResultsEachChain', store_one='storeResultsOneChain', store_none='doNotStoreResults', spark_cfg_file='sparkCfgFile', seed='seeds', ) def set_opt_var(opt_key, settings, args): """Set configuration variable from user options.""" value = args.get(opt_key) if value is None: return conf_key = USER_OPTS_CONF_KEYS.get(opt_key, opt_key) value_type = CONFIG_TYPES.get(conf_key, str) val = CONFIG_TYPES.get(conf_key, str)(value) if value_type != bool: settings[conf_key] = val return # default boolean user-opt arg is always False! if val: # user set it to true on cmd line, so adopt settings[conf_key] = val elif conf_key not in settings: # missing anyhow, so let's adopt settings[conf_key] = val else: # a default value is already present; ignoring this one pass CONFIG_OPTS_SETTERS = collections.defaultdict(lambda: set_opt_var) def set_log_level_opt(opt_key, settings, args): """Set configuration log level from user option.""" level = args.get(opt_key) if not level: return if level not in LogLevel.__members__: raise ValueError('invalid logging level specified: "{!s}"'.format(level)) settings[USER_OPTS_CONF_KEYS.get(opt_key, opt_key)] = level CONFIG_OPTS_SETTERS['log_level'] = set_log_level_opt def set_begin_end_chain_opt(opt_key, settings, args): """Set begin/end-chain variable from user option.""" chain = args.get(opt_key) if not chain: return if args.get('single_chain'): raise RuntimeError('"begin-with" and "end-with" chain options cannot be combined with "single-chain" option') settings[USER_OPTS_CONF_KEYS.get(opt_key, opt_key)] = str(chain) CONFIG_OPTS_SETTERS['begin_with'] = set_begin_end_chain_opt CONFIG_OPTS_SETTERS['end_with'] = set_begin_end_chain_opt def set_single_chain_opt(opt_key, settings, args): """Set single-chain variable from user option.""" chain = args.get(opt_key) if not chain: return settings[USER_OPTS_CONF_KEYS['begin_with']] = str(chain) settings[USER_OPTS_CONF_KEYS['end_with']] = str(chain) CONFIG_OPTS_SETTERS['single_chain'] = set_single_chain_opt def set_seeds(opt_key, settings, args): """Set random seeds.""" seed_args = args.get(opt_key) if not seed_args: return seeds = settings[USER_OPTS_CONF_KEYS.get(opt_key, opt_key)] for kv in seed_args: kv = kv.strip() eq_pos = kv.find('=') if eq_pos == 0 or eq_pos == len(kv) - 1: raise RuntimeError('expected "key=seed" for --seed command-line argument; got "{}"'.format(kv)) key, value = (kv[:eq_pos].strip().lower(), kv[eq_pos + 1:].strip()) if eq_pos > 0 else ('default', kv.strip()) seeds[key] = value CONFIG_OPTS_SETTERS['seed'] = set_seeds def set_custom_user_vars(opt_key, settings, args): """Set custom user configuration variables.""" custom_vars = args.get(opt_key) if not custom_vars: return for var in custom_vars: # parse key-value pair var = var.strip() eq_pos = var.find('=') if eq_pos < 1 or eq_pos > len(var) - 2: raise RuntimeError('Expected "key=value" for --conf-var command-line argument; got "{}"'.format(var)) key, value = var[:eq_pos].strip(), var[eq_pos + 1:].strip() # interpret type of value try: settings[key] = ast.literal_eval(value) except Exception: settings[key] = value CONFIG_OPTS_SETTERS['conf_var'] = set_custom_user_vars
PypiClean
/Explainer/dashboard_components/connectors.py
__all__ = [ 'CutoffPercentileComponent', 'PosLabelConnector', 'CutoffConnector', 'IndexConnector', 'HighlightConnector' ] import numpy as np import dash_core_components as dcc import dash_bootstrap_components as dbc import dash_html_components as html from dash.dependencies import Input, Output, State from dash.exceptions import PreventUpdate from ..dashboard_methods import * class CutoffPercentileComponent(ExplainerComponent): def __init__(self, explainer, title="Global cutoff", name=None, hide_title=False, hide_cutoff=False, hide_percentile=False, hide_selector=False, pos_label=None, cutoff=0.5, percentile=None, description=None, **kwargs): """ Slider to set a cutoff for Classifier components, based on setting the cutoff at a certain percentile of predictions, e.g.: percentile=0.8 means "mark the 20% highest scores as positive". This cutoff can then be conencted with other components like e.g. RocAucComponent with a CutoffConnector. Args: explainer (Explainer): explainer object constructed with either ClassifierExplainer() or RegressionExplainer() title (str, optional): Title of tab or page. Defaults to "Global Cutoff". name (str, optional): unique name to add to Component elements. If None then random uuid is generated to make sure it's unique. Defaults to None. hide_title (bool, optional): Hide title. hide_cutoff (bool, optional): Hide the cutoff slider. Defaults to False. hide_percentile (bool, optional): Hide percentile slider. Defaults to False. hide_selector (bool, optional): hide pos label selectors. Defaults to False. pos_label ({int, str}, optional): initial pos label. Defaults to explainer.pos_label cutoff (float, optional): Initial cutoff. Defaults to 0.5. percentile ([type], optional): Initial percentile. Defaults to None. description (str, optional): Tooltip to display when hover over component title. When None default text is shown. """ super().__init__(explainer, title, name) self.cutoff_name = 'cutoffconnector-cutoff-'+self.name self.selector = PosLabelSelector(explainer, name=self.name, pos_label=pos_label) if self.description is None: self.description = """ Select a model cutoff such that all predicted probabilities higher than the cutoff will be labeled positive, and all predicted probabilities lower than the cutoff will be labeled negative. You can also set the cutoff as a percenntile of all observations. Setting the cutoff here will automatically set the cutoff in multiple other connected component. """ self.register_dependencies(['preds', 'pred_percentiles']) def layout(self): return dbc.Card([ make_hideable( dbc.CardHeader([ html.H3(self.title, className="card-title", id='cutoffconnector-title-'+self.name), dbc.Tooltip(self.description, target='cutoffconnector-title-'+self.name), ]), hide=self.hide_title), dbc.CardBody([ dbc.Row([ dbc.Col([ dbc.Row([ make_hideable( dbc.Col([ html.Div([ html.Label('Cutoff prediction probability:'), dcc.Slider(id='cutoffconnector-cutoff-'+self.name, min = 0.01, max = 0.99, step=0.01, value=self.cutoff, marks={0.01: '0.01', 0.25: '0.25', 0.50: '0.50', 0.75: '0.75', 0.99: '0.99'}, included=False, tooltip = {'always_visible' : False}), ], style={'margin-bottom': 15}, id='cutoffconnector-cutoff-div-'+self.name), dbc.Tooltip(f"Scores above this cutoff will be labeled positive", target='cutoffconnector-cutoff-div-'+self.name, placement='bottom'), ]), hide=self.hide_cutoff), ]), dbc.Row([ make_hideable( dbc.Col([ html.Div([ html.Label('Cutoff percentile of samples:'), dcc.Slider(id='cutoffconnector-percentile-'+self.name, min = 0.01, max = 0.99, step=0.01, value=self.percentile, marks={0.01: '0.01', 0.25: '0.25', 0.50: '0.50', 0.75: '0.75', 0.99: '0.99'}, included=False, tooltip = {'always_visible' : False}), ], style={'margin-bottom': 15}, id='cutoffconnector-percentile-div-'+self.name), dbc.Tooltip(f"example: if set to percentile=0.9: label the top 10% highest scores as positive, the rest negative.", target='cutoffconnector-percentile-div-'+self.name, placement='bottom'), ]), hide=self.hide_percentile), ]) ]), make_hideable( dbc.Col([ self.selector.layout() ], width=2), hide=self.hide_selector), ]) ]) ]) def component_callbacks(self, app): @app.callback( Output('cutoffconnector-cutoff-'+self.name, 'value'), [Input('cutoffconnector-percentile-'+self.name, 'value'), Input('pos-label-'+self.name, 'value')] ) def update_cutoff(percentile, pos_label): if percentile is not None: return np.round(self.explainer.cutoff_from_percentile(percentile, pos_label=pos_label), 2) raise PreventUpdate class PosLabelConnector(ExplainerComponent): def __init__(self, input_pos_label, output_pos_labels): self.input_pos_label_name = self._get_pos_label(input_pos_label) self.output_pos_label_names = self._get_pos_labels(output_pos_labels) # if self.input_pos_label_name in self.output_pos_label_names: # # avoid circulat callbacks # self.output_pos_label_names.remove(self.input_pos_label_name) def _get_pos_label(self, input_pos_label): if isinstance(input_pos_label, PosLabelSelector): return 'pos-label-' + input_pos_label.name elif hasattr(input_pos_label, 'selector') and isinstance(input_pos_label.selector, PosLabelSelector): return 'pos-label-' + input_pos_label.selector.name elif isinstance(input_pos_label, str): return input_pos_label else: raise ValueError("input_pos_label should either be a str, " "PosLabelSelector or an instance with a .selector property" " that is a PosLabelSelector!") def _get_pos_labels(self, output_pos_labels): def get_pos_labels(o): if isinstance(o, PosLabelSelector): return ['pos-label-'+o.name] elif isinstance(o, str): return [str] elif hasattr(o, 'pos_labels'): return o.pos_labels return [] if hasattr(output_pos_labels, '__iter__'): pos_labels = [] for comp in output_pos_labels: pos_labels.extend(get_pos_labels(comp)) return list(set(pos_labels)) else: return get_pos_labels(output_pos_labels) def component_callbacks(self, app): if self.output_pos_label_names: @app.callback( [Output(pos_label_name, 'value') for pos_label_name in self.output_pos_label_names], [Input(self.input_pos_label_name, 'value')] ) def update_pos_labels(pos_label): return tuple(pos_label for i in range(len(self.output_pos_label_names))) class CutoffConnector(ExplainerComponent): def __init__(self, input_cutoff, output_cutoffs): """Connect the cutoff selector of input_cutoff with those of output_cutoffs. You can use this to connect a CutoffPercentileComponent with a RocAucComponent for example, When you change the cutoff in input_cutoff, all the cutoffs in output_cutoffs will automatically be updated. Args: input_cutoff ([{str, ExplainerComponent}]): Either a str or an ExplainerComponent. If str should be equal to the name of the cutoff property. If ExplainerComponent then should have a .cutoff_name property. output_cutoffs (list(str, ExplainerComponent)): list of str of ExplainerComponents. """ self.input_cutoff_name = self.cutoff_name(input_cutoff) self.output_cutoff_names = self.cutoff_name(output_cutoffs) if not isinstance(self.output_cutoff_names, list): self.output_cutoff_names = [self.output_cutoff_names] @staticmethod def cutoff_name(cutoffs): def get_cutoff_name(o): if isinstance(o, str): return o elif isinstance(o, ExplainerComponent): if not hasattr(o, "cutoff_name"): raise ValueError(f"{o} does not have an .cutoff_name property!") return o.cutoff_name raise ValueError(f"{o} is neither str nor an ExplainerComponent with an .cutoff_name property") if hasattr(cutoffs, '__iter__'): cutoff_name_list = [] for cutoff in cutoffs: cutoff_name_list.append(get_cutoff_name(cutoff)) return cutoff_name_list else: return get_cutoff_name(cutoffs) def component_callbacks(self, app): @app.callback( [Output(cutoff_name, 'value') for cutoff_name in self.output_cutoff_names], [Input(self.input_cutoff_name, 'value')] ) def update_cutoffs(cutoff): return tuple(cutoff for i in range(len(self.output_cutoff_names))) class IndexConnector(ExplainerComponent): def __init__(self, input_index, output_indexes): """Connect the index selector of input_index with those of output_indexes. You can use this to connect a RandomIndexComponent with a PredictionSummaryComponent for example. When you change the index in input_index, all the indexes in output_indexes will automatically be updated. Args: input_index ([{str, ExplainerComponent}]): Either a str or an ExplainerComponent. If str should be equal to the name of the index property. If ExplainerComponent then should have a .index_name property. output_indexes (list(str, ExplainerComponent)): list of str of ExplainerComponents. """ self.input_index_name = self.index_name(input_index) self.output_index_names = self.index_name(output_indexes) if not isinstance(self.output_index_names, list): self.output_index_names = [self.output_index_names] @staticmethod def index_name(indexes):#, multi=False): def get_index_name(o): if isinstance(o, str): return o elif isinstance(o, ExplainerComponent): if not hasattr(o, "index_name"): raise ValueError(f"{o} does not have an .index_name property!") return o.index_name raise ValueError(f"{o} is neither str nor an ExplainerComponent with an .index_name property") if hasattr(indexes, '__iter__'): index_name_list = [] for index in indexes: index_name_list.append(get_index_name(index)) return index_name_list else: return get_index_name(indexes) def component_callbacks(self, app): @app.callback( [Output(index_name, 'value') for index_name in self.output_index_names], [Input(self.input_index_name, 'value')] ) def update_indexes(index): return tuple(index for i in range(len(self.output_index_names))) class HighlightConnector(ExplainerComponent): def __init__(self, input_highlight, output_highlights): """Connect the highlight selector of input_highlight with those of output_highlights. You can use this to connect a DecisionTreesComponent component to a DecisionPathGraphComponent for example. When you change the highlight in input_highlight, all the highlights in output_highlights will automatically be updated. Args: input_highlight ([{str, ExplainerComponent}]): Either a str or an ExplainerComponent. If str should be equal to the name of the highlight property. If ExplainerComponent then should have a .highlight_name property. output_highlights (list(str, ExplainerComponent)): list of str of ExplainerComponents. """ self.input_highlight_name = self.highlight_name(input_highlight) self.output_highlight_names = self.highlight_name(output_highlights) if not isinstance(self.output_highlight_names, list): self.output_highlight_names = [self.output_highlight_names] @staticmethod def highlight_name(highlights): def get_highlight_name(o): if isinstance(o, str): return o elif isinstance(o, ExplainerComponent): if not hasattr(o, "highlight_name"): raise ValueError(f"{o} does not have an .highlight_name property!") return o.highlight_name raise ValueError(f"{o} is neither str nor an ExplainerComponent with an .highlight_name property") if hasattr(highlights, '__iter__'): highlight_name_list = [] for highlight in highlights: highlight_name_list.append(get_highlight_name(highlight)) return highlight_name_list else: return get_highlight_name(highlights) def component_callbacks(self, app): @app.callback( [Output(highlight_name, 'value') for highlight_name in self.output_highlight_names], [Input(self.input_highlight_name, 'value')]) def update_highlights(highlight): return tuple(highlight for i in range(len(self.output_highlight_names)))
PypiClean
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ckeditor/plugins/codemirror/js/mode/dtd/dtd.js
(function(mod) { if (typeof exports == "object" && typeof module == "object") // CommonJS mod(require("../../lib/codemirror")); else if (typeof define == "function" && define.amd) // AMD define(["../../lib/codemirror"], mod); else // Plain browser env mod(CodeMirror); })(function(CodeMirror) { "use strict"; CodeMirror.defineMode("dtd", function(config) { var indentUnit = config.indentUnit, type; function ret(style, tp) {type = tp; return style;} function tokenBase(stream, state) { var ch = stream.next(); if (ch == "<" && stream.eat("!") ) { if (stream.eatWhile(/[\-]/)) { state.tokenize = tokenSGMLComment; return tokenSGMLComment(stream, state); } else if (stream.eatWhile(/[\w]/)) return ret("keyword", "doindent"); } else if (ch == "<" && stream.eat("?")) { //xml declaration state.tokenize = inBlock("meta", "?>"); return ret("meta", ch); } else if (ch == "#" && stream.eatWhile(/[\w]/)) return ret("atom", "tag"); else if (ch == "|") return ret("keyword", "seperator"); else if (ch.match(/[\(\)\[\]\-\.,\+\?>]/)) return ret(null, ch);//if(ch === ">") return ret(null, "endtag"); else else if (ch.match(/[\[\]]/)) return ret("rule", ch); else if (ch == "\"" || ch == "'") { state.tokenize = tokenString(ch); return state.tokenize(stream, state); } else if (stream.eatWhile(/[a-zA-Z\?\+\d]/)) { var sc = stream.current(); if( sc.substr(sc.length-1,sc.length).match(/\?|\+/) !== null )stream.backUp(1); return ret("tag", "tag"); } else if (ch == "%" || ch == "*" ) return ret("number", "number"); else { stream.eatWhile(/[\w\\\-_%.{,]/); return ret(null, null); } } function tokenSGMLComment(stream, state) { var dashes = 0, ch; while ((ch = stream.next()) != null) { if (dashes >= 2 && ch == ">") { state.tokenize = tokenBase; break; } dashes = (ch == "-") ? dashes + 1 : 0; } return ret("comment", "comment"); } function tokenString(quote) { return function(stream, state) { var escaped = false, ch; while ((ch = stream.next()) != null) { if (ch == quote && !escaped) { state.tokenize = tokenBase; break; } escaped = !escaped && ch == "\\"; } return ret("string", "tag"); }; } function inBlock(style, terminator) { return function(stream, state) { while (!stream.eol()) { if (stream.match(terminator)) { state.tokenize = tokenBase; break; } stream.next(); } return style; }; } return { startState: function(base) { return {tokenize: tokenBase, baseIndent: base || 0, stack: []}; }, token: function(stream, state) { if (stream.eatSpace()) return null; var style = state.tokenize(stream, state); var context = state.stack[state.stack.length-1]; if (stream.current() == "[" || type === "doindent" || type == "[") state.stack.push("rule"); else if (type === "endtag") state.stack[state.stack.length-1] = "endtag"; else if (stream.current() == "]" || type == "]" || (type == ">" && context == "rule")) state.stack.pop(); else if (type == "[") state.stack.push("["); return style; }, indent: function(state, textAfter) { var n = state.stack.length; if( textAfter.match(/\]\s+|\]/) )n=n-1; else if(textAfter.substr(textAfter.length-1, textAfter.length) === ">"){ if(textAfter.substr(0,1) === "<")n; else if( type == "doindent" && textAfter.length > 1 )n; else if( type == "doindent")n--; else if( type == ">" && textAfter.length > 1)n; else if( type == "tag" && textAfter !== ">")n; else if( type == "tag" && state.stack[state.stack.length-1] == "rule")n--; else if( type == "tag")n++; else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule" && type === ">")n--; else if( textAfter === ">" && state.stack[state.stack.length-1] == "rule")n; else if( textAfter.substr(0,1) !== "<" && textAfter.substr(0,1) === ">" )n=n-1; else if( textAfter === ">")n; else n=n-1; //over rule them all if(type == null || type == "]")n--; } return state.baseIndent + n * indentUnit; }, electricChars: "]>" }; }); CodeMirror.defineMIME("application/xml-dtd", "dtd"); });
PypiClean
/FreeClimb-4.5.0-py3-none-any.whl/freeclimb/model/remove_from_conference.py
import re # noqa: F401 import sys # noqa: F401 from freeclimb.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from freeclimb.exceptions import ApiAttributeError def lazy_import(): from freeclimb.model.add_to_conference import AddToConference from freeclimb.model.create_conference import CreateConference from freeclimb.model.dequeue import Dequeue from freeclimb.model.enqueue import Enqueue from freeclimb.model.get_digits import GetDigits from freeclimb.model.get_speech import GetSpeech from freeclimb.model.hangup import Hangup from freeclimb.model.out_dial import OutDial from freeclimb.model.park import Park from freeclimb.model.pause import Pause from freeclimb.model.percl_command import PerclCommand from freeclimb.model.play import Play from freeclimb.model.play_early_media import PlayEarlyMedia from freeclimb.model.record_utterance import RecordUtterance from freeclimb.model.redirect import Redirect from freeclimb.model.reject import Reject from freeclimb.model.remove_from_conference import RemoveFromConference from freeclimb.model.remove_from_conference_all_of import RemoveFromConferenceAllOf from freeclimb.model.say import Say from freeclimb.model.send_digits import SendDigits from freeclimb.model.set_listen import SetListen from freeclimb.model.set_talk import SetTalk from freeclimb.model.sms import Sms from freeclimb.model.start_record_call import StartRecordCall from freeclimb.model.terminate_conference import TerminateConference from freeclimb.model.unpark import Unpark globals()['AddToConference'] = AddToConference globals()['CreateConference'] = CreateConference globals()['Dequeue'] = Dequeue globals()['Enqueue'] = Enqueue globals()['GetDigits'] = GetDigits globals()['GetSpeech'] = GetSpeech globals()['Hangup'] = Hangup globals()['OutDial'] = OutDial globals()['Park'] = Park globals()['Pause'] = Pause globals()['PerclCommand'] = PerclCommand globals()['Play'] = Play globals()['PlayEarlyMedia'] = PlayEarlyMedia globals()['RecordUtterance'] = RecordUtterance globals()['Redirect'] = Redirect globals()['Reject'] = Reject globals()['RemoveFromConference'] = RemoveFromConference globals()['RemoveFromConferenceAllOf'] = RemoveFromConferenceAllOf globals()['Say'] = Say globals()['SendDigits'] = SendDigits globals()['SetListen'] = SetListen globals()['SetTalk'] = SetTalk globals()['Sms'] = Sms globals()['StartRecordCall'] = StartRecordCall globals()['TerminateConference'] = TerminateConference globals()['Unpark'] = Unpark class RemoveFromConference(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'call_id': (str,), # noqa: E501 'command': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'call_id': 'callId', # noqa: E501 'command': 'command', # noqa: E501 } read_only_vars = { } @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """RemoveFromConference - a model defined in OpenAPI Keyword Args: call_id (str): ID of the Call leg to be removed from the Conference. The Call must be in a Conference or an error will be triggered. _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } composed_info = validate_get_composed_info( constant_args, kwargs, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] discarded_args = composed_info[3] for var_name, var_value in kwargs.items(): if var_name in discarded_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """RemoveFromConference - a model defined in OpenAPI Keyword Args: call_id (str): ID of the Call leg to be removed from the Conference. The Call must be in a Conference or an error will be triggered. _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) command (str): Name of PerCL Command (this is automatically derived from mapping configuration and should not be manually supplied in any arguments). [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } composed_info = validate_get_composed_info( constant_args, kwargs, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] discarded_args = composed_info[3] for var_name, var_value in kwargs.items(): if var_name in discarded_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.") @cached_property def command(): mappings = PerclCommand.discriminator['command'] mapping = next((mapping for mapping,schema in mappings.items() if schema == RemoveFromConference), None) if mapping == None: raise ApiAttributeError("{0} has no mapping '{1}'".format(RemoveFromConference.__class__.name, 'command')) return mapping @cached_property def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error because the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading lazy_import() return { 'anyOf': [ ], 'allOf': [ PerclCommand, RemoveFromConferenceAllOf, ], 'oneOf': [ ], }
PypiClean
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/demo/sampler/features/table/TableStylingExample.py
from muntjac.demo.sampler.ExampleUtil import ExampleUtil from muntjac.api import VerticalLayout, Table, Link, Button, Alignment from muntjac.ui import button from muntjac.event.action import Action from muntjac.event import action from muntjac.ui.table import IColumnGenerator, ICellStyleGenerator from muntjac.terminal.external_resource import ExternalResource from muntjac.event.item_click_event import IItemClickListener, ItemClickEvent ACTION_RED = Action('red') ACTION_BLUE = Action('blue') ACTION_GREEN = Action('green') ACTION_NONE = Action('none') ACTIONS = [ACTION_RED, ACTION_GREEN, ACTION_BLUE, ACTION_NONE] class TableStylingExample(VerticalLayout): def __init__(self): super(TableStylingExample, self).__init__() self.setSpacing(True) self._table = Table() self._markedRows = dict() self._markedCells = dict() self.addComponent(self._table) # set a style name, so we can style rows and cells self._table.setStyleName('contacts') # size self._table.setWidth('100%') self._table.setPageLength(7) # connect data source self._table.setContainerDataSource(ExampleUtil.getPersonContainer()) # Generate the email-link from firstname & lastname self._table.addGeneratedColumn('Email', TableColumnGenerator(self)) # turn on column reordering and collapsing self._table.setColumnReorderingAllowed(True) self._table.setColumnCollapsingAllowed(True) # Actions (a.k.a context menu) self._table.addActionHandler( TableActionHandler(self) ) # style generator self._table.setCellStyleGenerator( TableStyleGenerator(self) ) # toggle cell 'marked' styling when double-clicked self._table.addListener(TableClickListener(self), IItemClickListener) # Editing # we don't want to update container before pressing 'save': self._table.setWriteThrough(False) # edit button editButton = Button('Edit') self.addComponent(editButton) editButton.addListener(EditListener(self, editButton), button.IClickListener) self.setComponentAlignment(editButton, Alignment.TOP_RIGHT) class TableColumnGenerator(IColumnGenerator): def __init__(self, c): self._c = c def generateCell(self, source, itemId, columnId): item = self._c._table.getItem(itemId) fn = item.getItemProperty( ExampleUtil.PERSON_PROPERTY_FIRSTNAME).getValue() ln = item.getItemProperty( ExampleUtil.PERSON_PROPERTY_LASTNAME).getValue() email = fn.lower() + '.' + ln.lower() + '@example.com' # the Link -component: emailLink = Link(email, ExternalResource('mailto:' + email)) return emailLink class TableActionHandler(action.IHandler): def __init__(self, c): self._c = c def getActions(self, target, sender): return ACTIONS def handleAction(self, a, sender, target): if target in self._c._markedRows: del self._c._markedRows[target] if a != ACTION_NONE: # we're using the cations caption as stylename as well: self._c._markedRows[target] = a.getCaption() # this causes the CellStyleGenerator to return new styles, # but table can't automatically know, we must tell it: self._c._table.requestRepaint() class TableStyleGenerator(ICellStyleGenerator): def __init__(self, c): self._c = c def getStyle(self, itemId, propertyId): if propertyId is None: # no propertyId, styling row return self._c._markedRows.get(itemId) elif propertyId == 'Email': # style the generated email column return 'email' else: cells = self._c._markedCells.get(itemId) if cells is not None and propertyId in cells: return 'marked' # marked cell else: return None # no style class TableClickListener(IItemClickListener): def __init__(self, c): self._c = c def itemClick(self, event): if event.getButton() == ItemClickEvent.BUTTON_RIGHT: # you can handle left/right/middle -mouseclick pass if event.isDoubleClick(): itemId = event.getItemId() propertyId = event.getPropertyId() cells = self._c._markedCells.get(itemId) if cells is None: cells = set() self._c._markedCells[itemId] = cells if propertyId in cells: # toggle marking off cells.remove(propertyId) else: # toggle marking on cells.add(propertyId) # this causes the CellStyleGenerator to return new styles, # but table can't automatically know, we must tell it: self._c._table.requestRepaint() class EditListener(button.IClickListener): def __init__(self, c, editButton): self._c = c self._editButton = editButton def buttonClick(self, event): self._c._table.setEditable(not self._c._table.isEditable()) if self._c._table.isEditable(): self._editButton.setCaption('Save') else: self._editButton.setCaption('Edit')
PypiClean
/HTSQL-2.3.3.tar.gz/HTSQL-2.3.3/src/htsql/tweak/etl/cmd/merge.py
from ....core.util import listof from ....core.adapter import Utility, adapt from ....core.context import context from ....core.error import Error, PermissionError from ....core.entity import TableEntity, ColumnEntity from ....core.model import TableArc from ....core.classify import localize, relabel from ....core.connect import transaction, scramble, unscramble from ....core.domain import IdentityDomain, RecordDomain, ListDomain, Product from ....core.cmd.fetch import build_fetch from ....core.cmd.act import Act, ProduceAction, act from ....core.tr.bind import BindingState, Select from ....core.syn.syntax import VoidSyntax from ....core.tr.binding import (VoidBinding, RootBinding, FormulaBinding, LocateBinding, SelectionBinding, SieveBinding, AliasBinding, SegmentBinding, QueryBinding, FreeTableRecipe, ColumnRecipe) from ....core.tr.signature import IsEqualSig, AndSig, PlaceholderSig from ....core.tr.decorate import decorate from ....core.tr.coerce import coerce from ....core.tr.lookup import identify from .command import MergeCmd from .insert import (BuildExtractNode, BuildExtractTable, BuildExecuteInsert, BuildResolveIdentity, BuildResolveChain) from ..tr.dump import serialize_update import itertools class ExtractIdentityPipe(object): def __init__(self, node, arcs, id_indices, other_indices): self.node = node self.arcs = arcs self.id_indices = id_indices self.other_indices = other_indices def __call__(self, row): return (tuple(row[idx] for idx in self.id_indices), tuple(row[idx] for idx in self.other_indices)) class BuildExtractIdentity(Utility): def __init__(self, node, arcs): self.node = node self.arcs = arcs def __call__(self): identity_arcs = localize(self.node) if identity_arcs is None: raise Error("Expected a table with identity") index_by_arc = dict((arc, index) for index, arc in enumerate(self.arcs)) id_indices = [] for arc in identity_arcs: if arc not in index_by_arc: labels = relabel(arc) if not labels: raise Error("Missing identity field") else: label = labels[0] raise Error("Missing identity field %s" % label.name.encode('utf-8')) index = index_by_arc[arc] id_indices.append(index) other_indices = [] arcs = [] for idx, arc in enumerate(self.arcs): if arc in identity_arcs: continue other_indices.append(idx) arcs.append(arc) return ExtractIdentityPipe(self.node, arcs, id_indices, other_indices) class ResolveKeyPipe(object): def __init__(self, name, columns, domain, pipe, with_error): self.name = name self.columns = columns self.pipe = pipe self.domain = domain self.leaves = domain.leaves self.with_error = with_error def __call__(self, value): assert value is not None raw_values = [] for leaf in self.leaves: raw_value = value for idx in leaf: raw_value = raw_value[idx] raw_values.append(raw_value) product = self.pipe(raw_values) data = product.data assert len(data) <= 1 if data: return data[0] if self.with_error: quote = None if self.name: quote = u"%s[%s]" % (self.name, self.domain.dump(value)) else: quote = u"[%s]" % self.domain.dump(value) raise Error("Unable to find an entity", quote) return None class BuildResolveKey(Utility): def __init__(self, node, with_error=True): self.node = node self.table = node.table self.with_error = with_error def __call__(self): labels = relabel(TableArc(self.table)) name = labels[0].name if labels else None state = BindingState() syntax = VoidSyntax() scope = RootBinding(syntax) state.set_root(scope) seed = state.use(FreeTableRecipe(self.table), syntax) recipe = identify(seed) if recipe is None: raise Error("Cannot determine identity of a link") identity = state.use(recipe, syntax, scope=seed) count = itertools.count() def make_images(identity): images = [] for field in identity.elements: if isinstance(field.domain, IdentityDomain): images.extend(make_images(field)) else: item = FormulaBinding(scope, PlaceholderSig(next(count)), field.domain, syntax) images.append((item, field)) return images images = make_images(identity) scope = LocateBinding(scope, seed, images, None, syntax) state.push_scope(scope) columns = [] if self.table.primary_key is not None: columns = self.table.primary_key.origin_columns else: for key in self.table.unique_keys: if key.is_partial: continue if all(not column.is_nullable for column in key.origin_columns): rcolumns = key.origin_columns break if not columns: raise Error("Table does not have a primary key") elements = [] for column in columns: binding = state.use(ColumnRecipe(column), syntax) elements.append(binding) fields = [decorate(element) for element in elements] domain = RecordDomain(fields) scope = SelectionBinding(scope, elements, domain, syntax) binding = Select.__invoke__(scope, state) domain = ListDomain(binding.domain) binding = SegmentBinding(state.root, binding, domain, syntax) profile = decorate(binding) binding = QueryBinding(state.root, binding, profile, syntax) pipe = build_fetch(binding) domain = identity.domain return ResolveKeyPipe(name, columns, domain, pipe, self.with_error) class ExecuteUpdatePipe(object): def __init__(self, table, input_columns, key_columns, output_columns, sql): assert isinstance(table, TableEntity) assert isinstance(input_columns, listof(ColumnEntity)) assert isinstance(key_columns, listof(ColumnEntity)) assert isinstance(output_columns, listof(ColumnEntity)) assert isinstance(sql, unicode) self.table = table self.input_columns = input_columns self.key_columns = key_columns self.output_columns = output_columns self.sql = sql self.input_converts = [scramble(column.domain) for column in input_columns] self.key_converts = [scramble(column.domain) for column in key_columns] self.output_converts = [unscramble(column.domain) for column in output_columns] def __call__(self, key_row, row): key_row = tuple(convert(item) for item, convert in zip(key_row, self.key_converts)) row = tuple(convert(item) for item, convert in zip(row, self.input_converts)) if not row: return key_row if not context.env.can_write: raise PermissionError("No write permissions") with transaction() as connection: cursor = connection.cursor() cursor.execute(self.sql.encode('utf-8'), row+key_row) rows = cursor.fetchall() if len(rows) != 1: raise Error("Unable to locate the updated row") [row] = rows return row class BuildExecuteUpdate(Utility): def __init__(self, table, columns): assert isinstance(table, TableEntity) assert isinstance(columns, listof(ColumnEntity)) self.table = table self.columns = columns def __call__(self): table = self.table returning_columns = [] if table.primary_key is not None: returning_columns = table.primary_key.origin_columns else: for key in table.unique_keys: if key.is_partial: continue if all(not column.is_nullable for column in key.origin_columns): returning_columns = key.origin_columns break if not returning_columns: raise Error("Table does not have a primary key") sql = serialize_update(table, self.columns, returning_columns, returning_columns) return ExecuteUpdatePipe(table, self.columns, returning_columns, returning_columns, sql) class ProduceMerge(Act): adapt(MergeCmd, ProduceAction) def __call__(self): with transaction() as connection: product = act(self.command.feed, self.action) extract_node = BuildExtractNode.__invoke__(product.meta) extract_table = BuildExtractTable.__invoke__( extract_node.node, extract_node.arcs) extract_identity = BuildExtractIdentity.__invoke__( extract_node.node, extract_node.arcs) resolve_key = BuildResolveKey.__invoke__( extract_node.node, False) extract_table_for_update = BuildExtractTable.__invoke__( extract_identity.node, extract_identity.arcs) execute_insert = BuildExecuteInsert.__invoke__( extract_table.table, extract_table.columns) execute_update = BuildExecuteUpdate.__invoke__( extract_table_for_update.table, extract_table_for_update.columns) resolve_identity = BuildResolveIdentity.__invoke__( execute_insert.table, execute_insert.output_columns, extract_node.is_list) meta = resolve_identity.profile data = [] if extract_node.is_list: records = product.data record_domain = product.meta.domain.item_domain else: records = [product.data] record_domain = product.meta.domain for idx, record in enumerate(records): if record is None: continue try: row = extract_node(record) update_id, update_row = extract_identity(row) key = resolve_key(update_id) if key is not None: row = extract_table_for_update(update_row) key = execute_update(key, row) else: row = extract_table(row) key = execute_insert(row) row = resolve_identity(key) except Error, exc: if extract_node.is_list: message = "While merging record #%s" % (idx+1) else: message = "While merging a record" quote = record_domain.dump(record) exc.wrap(message, quote) raise data.append(row) if not extract_node.is_list: assert len(data) <= 1 if data: data = data[0] else: data = None return Product(meta, data)
PypiClean
/Aries-Python-0.1.330.tar.gz/Aries-Python-0.1.330/Aries/storage/gs.py
import os import logging import warnings import tempfile import base64 import binascii from functools import wraps from google.cloud import storage from google.cloud.exceptions import ServerError, Forbidden from ..strings import Base64String from ..tasks import FunctionTask from .base import StorageFolderBase from .cloud import BucketStorageObject, CloudStoragePrefix, CloudStorageIO logger = logging.getLogger(__name__) def setup_credentials(env_name, to_json_file=None): """Configures the GOOGLE_APPLICATION_CREDENTIALS by saving the value of an environment variable to a JSON file. """ # Use the b64 encoded content as credentials if "GOOGLE_CREDENTIALS" is set. credentials = os.environ.get(env_name) if credentials and credentials.startswith("ew"): if not to_json_file: temp_file = tempfile.NamedTemporaryFile(suffix=".json", delete=False) temp_file.close() to_json_file = temp_file.name Base64String(credentials).decode_to_file(to_json_file) # Set "GOOGLE_APPLICATION_CREDENTIALS" if json file exists. if os.path.exists(to_json_file): os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = to_json_file def api_call(func=None, *args, **kwargs): """Makes API call and retry if there is an exception. This is designed to resolve the 500 Backend Error from Google. Args: func (callable): A function or method. Examples: api_call(self.bucket.get_blob, self.prefix) See Also: https://developers.google.com/drive/api/v3/handle-errors#resolve_a_500_error_backend_error """ if not func: return None # logger.debug("Making API call: %s..." % func.__name__) with warnings.catch_warnings(): warnings.simplefilter("ignore", ResourceWarning) warnings.simplefilter("ignore", UserWarning) return FunctionTask(func, *args, **kwargs).run_and_retry( max_retry=3, exceptions=ServerError, base_interval=60, retry_pattern='linear', capture_output=False ) def api_decorator(method): """Decorator for making API call and retry if there is an exception. This is designed to resolve the 500 Backend Error from Google. When the decorated function is called, the function call will be retry if there is an exception. Examples: @api_decorator def exists(self): return self.blob.exists """ # logger.debug("Decorating %s for API call..." % method.__name__) @wraps(method) def wrapper(*method_args, **method_kwargs): return api_call(method, *method_args, **method_kwargs) return wrapper class GSObject(BucketStorageObject): """The base class for Google Storage Object. """ MAX_BATCH_SIZE = 900 @property def blob(self): """Gets or initialize a Google Cloud Storage Blob. Returns: A Google Cloud Storage Blob object. This does not check whether the object exists. Use blob.exists() to determine whether or not the blob exists. """ if self._blob is None: # logger.debug("Getting blob: %s" % self.uri) # The following line will avoid sending a GET request # but bucket object will not have the real metadata bucket = self.client.bucket(self.bucket_name) file_blob = api_call(bucket.get_blob, self.prefix) if file_blob is None: # The following will not make an HTTP request. # It simply instantiates a blob object owned by this bucket. # See https://googleapis.github.io/google-cloud-python/latest/storage/buckets.html # #google.cloud.storage.bucket.Bucket.blob file_blob = self.bucket.blob(self.prefix) self._blob = file_blob return self._blob @api_decorator def init_client(self): return storage.Client() @api_decorator def init_bucket(self): try: # The get_bucket() method requires permission to access the bucket. # The permission is not needed for getting the size or downloading the file. bucket = self.client.get_bucket(self.bucket_name) except Forbidden: # Fallback to initializing a bucket object without sending GET request logger.debug("Account does not have permission to access bucket: %s" % self.bucket_name) bucket = self.client.bucket(self.bucket_name) return bucket @property def gs_path(self): return self.uri @api_decorator def exists(self): """Determines if there is an actual blob corresponds to this object. """ return self.blob.exists() @api_decorator def create(self): """Creates an empty blob, if the blob does not exist. Returns: Blob: The Google Cloud Storage blob. """ blob = storage.Blob(self.prefix, self.bucket) if not blob.exists(): blob.upload_from_string("") return blob def delete(self): self.delete_blob(self.blob) def copy(self, to): self.copy_blob(self.blob, to) def copy_blob(self, blob, to): """Copies a blob object in the bucket to a new location. Args: blob: A Google Cloud Storage Blob object in the bucket. to: URI of the new blob (gs://...). Returns: True if the blob is copied. Otherwise False. """ destination = GSObject(to) new_name = str(blob.name).replace(self.prefix, destination.prefix, 1) if new_name != str(blob.name) or self.bucket_name != destination.bucket_name: self.bucket.copy_blob(blob, destination.bucket, new_name) return True return False @staticmethod def delete_blob(blob): blob.delete() class GSPrefix(CloudStoragePrefix, GSObject): # @api_decorator def batch_request(self, blobs, method, *args, **kwargs): """Sends a batch request to run method of a batch of blobs. The "method" will be applied to each blob in blobs like method(blob, *args, **kwargs) Args: blobs: A list of blobs, to be processed in a SINGLE batch. method: The method for processing each blob. *args: Additional arguments for method. **kwargs: Keyword arguments for method. Returns: """ if not blobs: return 0 counter = 0 try: with self.client.batch(): for blob in blobs: method(blob, *args, **kwargs) counter += 1 except ValueError as ex: # Suppress the no deferred request errors # This error occurs when there is no file/blob in the batch. if str(ex).strip() == "No deferred requests": return 0 raise ex return counter # @api_decorator def batch_operation(self, method, *args, **kwargs): blobs = self.blobs() batch = [] counter = 0 for blob in blobs: batch.append(blob) if len(batch) > self.MAX_BATCH_SIZE: counter += self.batch_request(batch, method, *args, **kwargs) batch = [] if batch: counter += self.batch_request(batch, method, *args, **kwargs) return counter @api_decorator def blobs(self, delimiter=None): """Gets the blobs in the bucket having the prefix. The returning list will contain object in the folder and all sub-folders Args: delimiter: Use this to emulate hierarchy. If delimiter is None, the returning list will contain objects in the folder and in all sub-directories. Set delimiter to "/" to eliminate files in sub-directories. Returns: A list of GCS blobs. See Also: https://googleapis.github.io/google-cloud-python/latest/storage/blobs.html """ return list(self.bucket.list_blobs(prefix=self.prefix, delimiter=delimiter)) @property def uri_list(self): """Gets all file URIs with the prefix """ return [ "gs://%s/%s" % (self.bucket_name, b.name) for b in self.blobs() if not b.name.endswith("/") ] @property def files(self): from .io import StorageFile storage_files = [] for b in self.blobs("/"): if b.name.endswith("/"): continue storage_file = StorageFile("gs://%s/%s" % (self.bucket_name, b.name)) storage_file.raw_io._blob = b storage_files.append(storage_file) return storage_files @property def folders(self): return self.list_folders() @api_decorator def list_folders(self): from .io import StorageFolder iterator = self.bucket.list_blobs(prefix=self.prefix, delimiter='/') list(iterator) return [ StorageFolder("gs://%s/%s" % (self.bucket_name, p)) for p in iterator.prefixes ] def exists(self): return True if self.blob.exists() or self.objects else False @api_decorator def delete(self): """Deletes all objects with the same prefix.""" counter = self.batch_operation(self.delete_blob) logger.debug("%d files deleted." % counter) return counter @api_decorator def copy(self, to, contents_only=False): """Copies folder/file in a Google Cloud storage directory to another one. Args: to (str): Destination Google Cloud Storage path. contents_only: Copies only the content of the folder. This applies only if the GSObject is a folder. Defaults to False, i.e. a folder (with the same name as this folder) will be created at the destination to contain the files. Returns: The number of files copied. Warnings: When the URI of GSObject ends with "/", i.e. it is a folder, use "contents_only" to indicate if a new folder should be created to contain all files copied. When the GSObject is a file or a set of filtered files with the same prefix: If to ends with slash ("/"), all files will be copied under the "to" folder. folders partially in the prefix will be kept. If to does NOT end with slash, the prefix of all files will simply be replaced with the prefix in "to". See the following examples for more details. Example: Either GSFolder("gs://bucket_a/alpha/beta/").copy("gs://bucket_b/x/y") or GSFolder("gs://bucket_a/alpha/beta/").copy("gs://bucket_b/x/y/") will copy the following files: gs://bucket_a/alpha/beta/gamma/example.txt gs://bucket_a/alpha/beta/example.txt to gs://bucket_b/x/y/beta/gamma/example.txt gs://bucket_b/x/y/beta/example.txt Either GSFolder("gs://bucket_a/alpha/beta/").copy("gs://bucket_b/x/y", contents_only=True) or GSFolder("gs://bucket_a/alpha/beta/").copy("gs://bucket_b/x/y/", contents_only=True) will copy the following files: gs://bucket_a/alpha/beta/gamma/example.txt gs://bucket_a/alpha/beta/example.txt to gs://bucket_b/x/y/gamma/example.txt gs://bucket_b/x/y/example.txt Also GSFolder("gs://bucket_a/alpha/be").copy("gs://bucket_b/x/y/") will copy the following files: gs://bucket_a/alpha/beta/gamma/example.txt gs://bucket_a/alpha/beta/example.txt to gs://bucket_b/x/y/beta/gamma/example.txt gs://bucket_b/x/y/beta/example.txt However GSFolder("gs://bucket_a/alpha/be").copy("gs://bucket_b/x/y") will copy the following files: gs://bucket_a/alpha/beta/gamma/example.txt gs://bucket_a/alpha/beta/example.txt to gs://bucket_b/x/yta/gamma/example.txt gs://bucket_b/x/yta/example.txt """ # Check if the destination is a bucket root. # Prefix will be empty if destination is bucket root. # Always append "/" to bucket root. if not GSObject(to).prefix and not to.endswith("/"): to += "/" if self.prefix.endswith("/"): # The source is a folder if its prefix ends with "/" if contents_only: to += "/" else: # Copy the contents into a folder with the same name. to = os.path.join(to, self.name) + "/" else: # Otherwise, it can be a file or an object or a set of filtered objects or a folder. if to.endswith("/"): # If the destination ends with "/", # copy all objects under the destination to += self.name else: # If the destination does not end with "/", # simply replace the prefix. pass # logger.debug("Copying files to %s" % to) source_files = self.blobs() if not source_files: logger.debug("No files in %s" % self.uri) return 0 counter = self.batch_operation(self.copy_blob, to) logger.debug("%d files copied." % counter) return counter class GSFolder(GSPrefix, StorageFolderBase): """Represents a Google Cloud Storage Folder Method Resolution Order: GSFolder, GSObject, StorageFolder, StorageObject """ def __init__(self, uri): """Initializes a Google Cloud Storage Directory. Args: uri: The path of the object, e.g. "gs://bucket_name/path/to/dir/". """ # super() will call the __init__() of StorageObject, StorageFolder and GSObject GSObject.__init__(self, uri) StorageFolderBase.__init__(self, uri) # Make sure prefix ends with "/", otherwise it is not a "folder" if self.prefix and not self.prefix.endswith("/"): self.prefix += "/" def exists(self): return True if self.blob.exists() or self.file_paths or self.folder_paths else False @property def folder_paths(self): """Folders(Directories) in the directory. """ return self.__folders_paths() @api_decorator def __folders_paths(self): iterator = self.bucket.list_blobs(prefix=self.prefix, delimiter='/') list(iterator) return [ "gs://%s/%s" % (self.bucket_name, p) for p in iterator.prefixes ] @property def file_paths(self): """Files in the directory """ paths = self.__file_paths() return paths def __file_paths(self): return [ "gs://%s/%s" % (self.bucket_name, b.name) for b in self.blobs("/") if not b.name.endswith("/") ] @api_decorator def filter_files(self, prefix): return [ GSFile("gs://%s/%s" % (self.bucket_name, b.name)) for b in self.bucket.list_blobs(prefix=os.path.join(self.prefix, prefix), delimiter='/') if not b.name.endswith("/") ] class GSFile(GSObject, CloudStorageIO): def __init__(self, uri): """Represents a file on Google Cloud Storage as a file-like object implementing the IOBase interface. Args: uri: GSFile allows seek and read without opening the file. However, position/offset will be reset when open() is called. The context manager calls open() when enter. """ GSObject.__init__(self, uri) CloudStorageIO.__init__(self, uri) @property def updated_time(self): return self.blob.updated @property def md5_hex(self): return binascii.hexlify(base64.urlsafe_b64decode(self.blob.md5_hash)).decode() def get_size(self): return self.blob.size def read_bytes(self, start, end): return api_call(self.blob.download_as_bytes, start=start, end=end) def download(self, to_file_obj): api_call(self.blob.download_to_filename, to_file_obj.name) return to_file_obj def upload(self, from_file_obj): api_call(self.blob.upload_from_file, from_file_obj)
PypiClean
/AN-DiscordBot-3.9.4.tar.gz/AN-DiscordBot-3.9.4/anbot/__main__.py
# Discord Version check import sys import discord from anbot.core.bot import AN, ExitCodes from anbot.core.data_manager import create_temp_config, load_basic_configuration, config_file from anbot.core.json_io import JsonIO from anbot.core.global_checks import init_global_checks from anbot.core.events import init_events from anbot.core.cli import interactive_config, confirm, parse_cli_flags, ask_sentry from anbot.core.core_commands import Core from anbot.core import __version__ import asyncio import logging.handlers import logging import os # Let's not force this dependency, uvloop is much faster on cpython if sys.implementation.name == "cpython": try: import uvloop except ImportError: pass else: asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) if sys.platform == "win32": asyncio.set_event_loop(asyncio.ProactorEventLoop()) # # AN - Discord Bot v3 # # Made by Aditya Nugraha, improved by many # def init_loggers(cli_flags): # d.py stuff dpy_logger = logging.getLogger("discord") dpy_logger.setLevel(logging.WARNING) console = logging.StreamHandler() console.setLevel(logging.WARNING) dpy_logger.addHandler(console) # AN stuff logger = logging.getLogger("an") an_format = logging.Formatter( "%(asctime)s %(levelname)s %(module)s %(funcName)s %(lineno)d: %(message)s", datefmt="[%d/%m/%Y %H:%M]", ) stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setFormatter(an_format) if cli_flags.debug: os.environ["PYTHONASYNCIODEBUG"] = "1" logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) from anbot.core.data_manager import core_data_path logfile_path = core_data_path() / "an.log" fhandler = logging.handlers.RotatingFileHandler( filename=str(logfile_path), encoding="utf-8", mode="a", maxBytes=10 ** 7, backupCount=5 ) fhandler.setFormatter(an_format) logger.addHandler(fhandler) logger.addHandler(stdout_handler) # Sentry stuff sentry_logger = logging.getLogger("an.sentry") sentry_logger.setLevel(logging.WARNING) return logger, sentry_logger async def _get_prefix_and_token(an, indict): """ Again, please blame <@269933075037814786> for this. :param indict: :return: """ indict["token"] = await an.db.token() indict["prefix"] = await an.db.prefix() indict["enable_sentry"] = await an.db.enable_sentry() def list_instances(): if not config_file.exists(): print( "No instances have been configuan! Configure one " "using `anbot-setup` before trying to run the bot!" ) sys.exit(1) else: data = JsonIO(config_file)._load_json() text = "Configuan Instances:\n\n" for instance_name in sorted(data.keys()): text += "{}\n".format(instance_name) print(text) sys.exit(0) def main(): description = "AN V3" cli_flags = parse_cli_flags(sys.argv[1:]) if cli_flags.list_instances: list_instances() elif cli_flags.version: print(description) sys.exit(0) elif not cli_flags.instance_name and not cli_flags.no_instance: print("Error: No instance name was provided!") sys.exit(1) if cli_flags.no_instance: print( "\033[1m" "Warning: The data will be placed in a temporary folder and removed on next system reboot." "\033[0m" ) cli_flags.instance_name = "temporary_an" create_temp_config() load_basic_configuration(cli_flags.instance_name) log, sentry_log = init_loggers(cli_flags) an = AN(cli_flags=cli_flags, description=description, pm_help=None) init_global_checks(an) init_events(an, cli_flags) an.add_cog(Core(an)) loop = asyncio.get_event_loop() tmp_data = {} loop.run_until_complete(_get_prefix_and_token(an, tmp_data)) token = os.environ.get("RED_TOKEN", tmp_data["token"]) if cli_flags.token: token = cli_flags.token prefix = cli_flags.prefix or tmp_data["prefix"] if not (token and prefix): if cli_flags.no_prompt is False: new_token = interactive_config(an, token_set=bool(token), prefix_set=bool(prefix)) if new_token: token = new_token else: log.critical("Token and prefix must be set in order to login.") sys.exit(1) loop.run_until_complete(_get_prefix_and_token(an, tmp_data)) if cli_flags.dry_run: loop.run_until_complete(an.http.close()) sys.exit(0) if tmp_data["enable_sentry"]: an.enable_sentry() try: loop.run_until_complete(an.start(token, bot=True)) except discord.LoginFailure: log.critical("This token doesn't seem to be valid.") db_token = loop.run_until_complete(an.db.token()) if db_token and not cli_flags.no_prompt: print("\nDo you want to reset the token? (y/n)") if confirm("> "): loop.run_until_complete(an.db.token.set("")) print("Token has been reset.") except KeyboardInterrupt: log.info("Keyboard interrupt detected. Quitting...") loop.run_until_complete(an.logout()) an._shutdown_mode = ExitCodes.SHUTDOWN except Exception as e: log.critical("Fatal exception", exc_info=e) sentry_log.critical("Fatal Exception", exc_info=e) loop.run_until_complete(an.logout()) finally: pending = asyncio.Task.all_tasks(loop=an.loop) gathean = asyncio.gather(*pending, loop=an.loop, return_exceptions=True) gathean.cancel() try: loop.run_until_complete(an.rpc.close()) except AttributeError: pass sys.exit(an._shutdown_mode.value) if __name__ == "__main__": main()
PypiClean
/Charty-0.1.0.tar.gz/Charty-0.1.0/charty/example.py
from charty import Column, Line, Pie g = Column( 600, 300, [ [('aaaaaaaaaa', 'Subsidy Spending Unknown'), ('bbbbbbbbbb',800) ], [('aaaaaaaaaa',230), ('bbbbbbbbbb',260), ('cccc', 300) ] ], "css/barchart.css", label_rotate=-45, y_padding=30 ) g.output("svg/bar.svg") h = Column( 600, 300, [ [(2000, 10), (2001, 30), (2002, 40), (2003, 50) ], [(1990, 30), (1992, 40), (2004, 50) ] ], 'css/barchart.css', label_intervals=2, x_padding=15 ) h.output("svg/line.svg") h = Pie( 500, 500, [ [(2000, 1230), (2001, 3230), (2002, 4000), (2003, 1250), (2004, 1000), (2005, 1200), (2006, 800), (2007, 100), (22, 2332), (30, 3234) ] ], 'css/piechart.css', y_padding=70, x_padding=70 ) h.output("svg/pie.svg") #risk transfers risk_transfers = Column( 370, 185, [ [('Outstanding Credit', 7445983679), ('Subsidy', 114141251) ] ], "css/barchart.css", padding=10, currency=True ) risk_transfers.output("svg/risk_transfers.svg") #contracts contracts = Line( 515, 330, [ [(2000, 12972951342), (2001, 14109441817), (2002, 15780150198), (2003, 20911608531), (2004, 21064466760), (2005, 15275901189) ], [(2005, 15275901189), (2006, 15939230914), (2007, 19124684255), (2008, 18239197959), (2009, 16226290107) ] ], 'css/linechart.css', label_intervals=3, x_padding=40, units=True, currency=True ) contracts.output("svg/contracts.svg")
PypiClean
/Electrum-Zcash-Random-Fork-3.1.3b5.tar.gz/Electrum-Zcash-Random-Fork-3.1.3b5/gui/kivy/uix/dialogs/password_dialog.py
from kivy.app import App from kivy.factory import Factory from kivy.properties import ObjectProperty from kivy.lang import Builder from decimal import Decimal from kivy.clock import Clock from electrum_zcash.util import InvalidPassword from electrum_zcash_gui.kivy.i18n import _ Builder.load_string(''' <PasswordDialog@Popup> id: popup title: 'Electrum-Zcash' message: '' BoxLayout: size_hint: 1, 1 orientation: 'vertical' Widget: size_hint: 1, 0.05 Label: font_size: '20dp' text: root.message text_size: self.width, None size: self.texture_size Widget: size_hint: 1, 0.05 Label: id: a font_size: '50dp' text: '*'*len(kb.password) + '-'*(6-len(kb.password)) size: self.texture_size Widget: size_hint: 1, 0.05 GridLayout: id: kb size_hint: 1, None height: self.minimum_height update_amount: popup.update_password password: '' on_password: popup.on_password(self.password) spacing: '2dp' cols: 3 KButton: text: '1' KButton: text: '2' KButton: text: '3' KButton: text: '4' KButton: text: '5' KButton: text: '6' KButton: text: '7' KButton: text: '8' KButton: text: '9' KButton: text: 'Clear' KButton: text: '0' KButton: text: '<' ''') class PasswordDialog(Factory.Popup): def init(self, app, wallet, message, on_success, on_failure, is_change=0): self.app = app self.wallet = wallet self.message = message self.on_success = on_success self.on_failure = on_failure self.ids.kb.password = '' self.success = False self.is_change = is_change self.pw = None self.new_password = None self.title = 'Electrum-Zcash' + (' - ' + self.wallet.basename() if self.wallet else '') def check_password(self, password): if self.is_change > 1: return True try: self.wallet.check_password(password) return True except InvalidPassword as e: return False def on_dismiss(self): if not self.success: if self.on_failure: self.on_failure() else: # keep dialog open return True else: if self.on_success: args = (self.pw, self.new_password) if self.is_change else (self.pw,) Clock.schedule_once(lambda dt: self.on_success(*args), 0.1) def update_password(self, c): kb = self.ids.kb text = kb.password if c == '<': text = text[:-1] elif c == 'Clear': text = '' else: text += c kb.password = text def on_password(self, pw): if len(pw) == 6: if self.check_password(pw): if self.is_change == 0: self.success = True self.pw = pw self.message = _('Please wait...') self.dismiss() elif self.is_change == 1: self.pw = pw self.message = _('Enter new PIN') self.ids.kb.password = '' self.is_change = 2 elif self.is_change == 2: self.new_password = pw self.message = _('Confirm new PIN') self.ids.kb.password = '' self.is_change = 3 elif self.is_change == 3: self.success = pw == self.new_password self.dismiss() else: self.app.show_error(_('Wrong PIN')) self.ids.kb.password = ''
PypiClean
/DynEnv-2.0.tar.gz/DynEnv-2.0/README.md
# DynEnv Dynamic Simulation Environments for Reinforcement Learning This project contains two reinforcement learning environments based on 2D physics simulation via [pymunk](https://www.pymunk.org). The environments support different observation modalities and also noisy observations. The current environments are the following: - **Robot Soccer SPL League (RoboCupEnvironment):** Here, two teams of robots are competing to play soccer. - **Autonomous driving environment (DrivingEnvironment):** Here, two teams of cars try to get to their unique destinations as quickly as possible without crashing or hitting pedestrians. The teams are not competing here, but only cars on the same team are allowed to share information (to model human drivers). ## Table of contents * [Requirements](#requirements) * [Installation](#installation) * [Usage](#usage) * [Model structure](#model-structure) * [Parameters](#parameters) * [Important functions and members](#important-functions-and-members) * [So, what are the actions?](#so-what-are-the-actions) * [What is returned?](#what-is-returned) * [Coding conventions](#coding-conventions) ## Requirements - Python 3.6+ - PyMunk - OpenCV - PyGame - PyTorch (optional) ## Installation You can install simply using pip: `pip install DynEnv` Or build from source: ``` git clone https://github.com/szemenyeim/DynEnv.git cd DynEnv pip install -e . ``` ## Usage You can simply use the environments the following way: ```python from DynEnv import * myEnv = RoboCupEnvironment(nPlayers) myEnv = DrivingEnvironment(nPlayers) ret = myEnv.step(actions) ``` Or create vectorized environments by using: ```python env, env_name = make_dyn_env(env, num_envs, num_players, render, observationType, noiseType, noiseMagnitude, use_continuous_actions) ``` More complex examples including - neural networks tailored for the special output format (i.e. the number of observations can vary through time), - logging and - plotting the results. For the above, confer the `DynEnv/examples` directory. The `main.py` file consists a full example, while if you would like to try out how the environments work by hand, `play.py` is there for you as well. ### Model structure The most important part from the point of view of the neural network is the `DynEnv/models` directory, which exposes you the following classes: - _ICMAgent_: the top-level agent consisting of an A2C and an Intrinsic Curiosity Module (and its variant, [Rational Curiosity Module](https://github.com/rpatrik96/AttA2C)) - _InOutArranger_: helper class to rearrange observations for simple NN forwarding - _EmbedBlock_: the embedding network used for an object - _InputLayer_: a complex network which convert all observations into a unified feature space - _ActorBlock_: a neural network predicting actions for a given action type - _ActorLayer_: an ensemble of _ActorBlock_ to predict every action - _AttentionLayer_: - _DynEnvFeatureExtractor_: a wrapper for the input transform by _InputLayer_, collapsing the time dimension with Recurrent Temporal Attention and running an LSTM ### Parameters Here are some of the important settings of the environments - **nPlayers [1-5/10]**: Number of total players in the environment (in the RoboCup env this is per team). The limit is 10 in the Driving, 5 in the RoboCup env. - **render [bool]**: Whether to visualize the environment. - **observationType [Full, Partial, Image]**: Image observation only supported for the RoboCup environment. - **noiseType [Random, Realistic]**: Realistic noise: noise magnitude and false negative rate depends on distance, proximity of other objects and sighting type. False positives and misclassifications are more likely to occur in certain situations. - **noiseMagnitude [0-5]**: Variable to control noise - **continuousActions [bool]**: Whether the driving env actions are understood as categorical or continuous. (Driving env only) - **allowHeadturn [bool]**: Enables head turining actions. (RoboCup env only) Here are some examples of different noise and observation types #### Random Noise Full Observation | Partial Observation :-------------------------:|:-------------------------: ![](/images/randNoise/game.gif) | ![](/images/randNoise/obs.gif) Top Camera | Bottom Camera :-------------------------:|:-------------------------: ![](/images/randNoise/top.gif) | ![](/images/randNoise/bottom.gif) #### Realistic Noise Full Observation | Partial Observation :-------------------------:|:-------------------------: ![](/images/realNoise/game.gif) | ![](/images/realNoise/obs.gif) Top Camera | Bottom Camera :-------------------------:|:-------------------------: ![](/images/realNoise/top.gif) | ![](/images/realNoise/bottom.gif) #### Large, Realistic Noise Full Observation | Partial Observation :-------------------------:|:-------------------------: ![](/images/bigNoise/game.gif) | ![](/images/bigNoise/obs.gif) Top Camera | Bottom Camera :-------------------------:|:-------------------------: ![](/images/bigNoise/top.gif) | ![](/images/bigNoise/bottom.gif) #### Driving, Realistic Noise Full Observation | Partial Observation :-------------------------:|:-------------------------: ![](/images/drive/game.gif) | ![](/images/drive/obs.gif) ### Important functions and members - `reset()` Resets the environment to a new game and returns initial observations. - `setRandomSeed(seed)` Sets the environment seed, resets the environment and returns initial observations. - `observation_space` Returns information about the observations returned by the environrment. For the exact meaning please refer to [The Observation Space](#the-observation-space) section. - `action_space` Returns information about the actions the environment expects. - `step(actions)` Performs one step. This consists of several simulation steps (10 for the Driving and 50 for the RoboCup environments). It returns observations for every 10 simulation steps and full state for the last step. - `renderMode` Whether to render to a display (`'human'`) or to a memory array (`'memory'`). - `agentVisID` With this, you can visualize the observation of an agent during rendering. - `render()` Returns rendered images if the render mode is `'memory'`. Does nothing otherwise, as the rendering is done by the step function due to the multi-timestep feature. ### So, what are the actions? The environments expect an iterable object containing the actions for every player. Each player action must contain the following: #### RoboCup: - **Movement direction:** Categorical (5) - **Turn:** Categorical (3) - **Turn head:** Continuous [-6 +6] - **Kick:** Categorical (3) (this is exclusive with moving or turning) #### Driving: - **Gas/break:** Continuous [-3 +3] or Categorical (3) - **Turn:** Continuous [-3 +3] or Categorical (3) ### What is returned? Both environments return the following variables in the step function: - **Observations:** Observations for every agent. What this is exactly depends on the observationType variable. - **Rewards:** Rewards for each agent. - **Team rewards:** Shared rewards for every team. These are added to the agent reward variables, and are not returned. - **Finished:** Game over flag - **Info:** Other important data - **Full State:** The full state of the env - **episode_r**: Cumulative rewards for the episode (Returned only at the end of an episode) - **episode_p_r**: Cumulative positive-only rewards for the episode (Returned only at the end of an episode) - **episode_g**: Goals in these episode. For RoboCup this is goals per team, for the Driving env the first value is the number of cars that reached their destination without crashing, the second is the number of crashed cars. (Returned only at the end of an episode) Position information is normalized in both the observations and the full state. #### The Observation Space Due to limitations in the OpenAI gym, this part of the environment is not fully compatible. The `observation_space` variable is an instance of `gym.space.Space`, however, the meaning is slightly different. The main differences are: - the observation space only gives you a placeholder for each object type to be observed (as dynamic length observation spaces are not supported in OpenAI gym) - the `.sample()` method will not work without a slight modification (see example below) - following the example, you will get a valid observation format. Unfortunately, to provide an interface as close to gym as possible, we were forced to break some methods in our observation space (mainly to be able to use the `SubprocVecEnv` method from `stable-baselines`), while providing as much information about the observation space as possible. We needed to upcast the observation space to `gym.space.Space` from `gym.space.Tuple` to be able to vectorize the environments (we could have implemented a custom environment, but the goal was to avoid writing custom code to maintain a clean API for the users). This step did not result in any loss of information, but if you would like to use methods not implemented in the base class (i.e. `gym.space.Space`), you should downcast the environment. ```python env, env_name = make_dyn_env(...) # raises NotImplementedError env.sample() # downcast observation space and it works ! env.observation_space.__class__ = gym.spaces.Tuple env.sample() ``` I.e. querying the `observation_space` variable after the trick and calling `.sample()` on it will get you a fully valid observation format, it does not cover every form of observations an environment can produce. Let us elaborate on that! ##### Gym-like observation space descriptor Due to the fact that in every time step each agent can see different number of objects (such as cars in the _Driving_ environment), including 0 as a valid number for each object type (not to mention false positive sightings or misclassifications), we cannot give an observation space format which covers all possibilities. However, what we can do is to _assume_ that each object type is present in the observation with a single instance, thus including every necessary information about the object space (but be aware that multiple observations from the same object type can be in the list of observations). Here is an example for the Driving environment how the observation space looks like (we use extensively the `Dict` gym space, as it enables to describe what is contained): ```python ... # subspace for cars car_space = Dict({ "position": Box(-self.mean * 2, +self.mean * 2, shape=(2,)), "orientation": Box(-1, 1, shape=(2,)), "width_height": Box(-10, 10, shape=(2,)), "finished": MultiBinary(1) }) ... # assemble observation space self.observation_space = Tuple([ self_space, car_space, obstacle_space, pedestrian_space, lane_space ]) ``` ##### List of observations The observations returned are arranged as follows: `[nParallelEnvs x nTimeSteps x nAgents x nObjectType]` Each element of the above list is a NumPy array containing all the observations by a single agent in a single timestep. To help contructing input layers a custom class `DynEnv.models.InOutArranger` is provided with the following two functions: - `inputs, counts = rearrange_inputs(x)`: Creates a single list of NumPy arrays. Each element of this list contains a single numpy array of all the observations for a given object type. (Warning: in some cases this might be an empty list!) - `outputs, masks = rearrange_outputs(inputs, counts, device)`: Takes a list of Torch Tensors and the counts output by the previous function, and creates a single tensor shaped [TimeSteps x maxObjCnt x nPlayers x featureCnt] by padding the second dimension to the largest number of objects seen for every robot. The masks variable is binary array shaped [TimeSteps x maxObjCnt x nPlayers], which is True for padded elements (this is in line with PyTorch's MultiHeadedAttention layer). (Warning: This assumes that the featureCnt is the same for every object time.) Here is a more comprehensive example: ```python from DynEnv.models import * from torch import nn # setup environment, query all required variables myEnv = ... obsSpace = myEnv.observation_space nTime = 5 if env is DynEnvType.ROBO_CUP else 1 nPlayers = ... featuresPerObject = [flatdim(s) for s in obsSpace.spaces] nObjectTypes = len(featuresPerObject) # create neural network and rearrange inputs device = <CUDA or CPU> myNeuralNets = [nn.Linear(objfeat,128).to(device) for objFeat in featuresPerObject] myArranger = models.InOutArranger(nObjectTypes,nPlayers,nTime) ... # create sample action and step actions = torch.stack([action_space.sample() for _ in range(nPlayers)] obs, _ = myEnv.step(actions) # summary # rearrange inputs - forward - rearrange outputs netInputs, counts = myArranger.rearrange_inputs(obs) netOutputs = [myNet(torch.tensor(netInput).to(device)) for myNet,netInput in zip(myNeuralNets,netInputs)] outputs,masks = myArranger.rearrange_outputs(netOutputs,counts,device) ``` #### RoboCup The full state contains the following: - Robots **[x, y, cos(angle), sin(angle), team ID, fallen or penalized]** - Balls **[x, y, ball owned by team ID, closest robot status]** 'Team ID' is +/-1. 'Fallen or penalized' and 'closest robot status' are binary numbers. The latter is 1 for the robot closest to the ball from each team. If the observation is full state, the robot's own position is returned in a separate list, and both axes are flipped and angles rotated 180 degrees for team -1. Moreover, in this case the ball owned flag indicates whether the ball is owned by the robot's team, or the opponent. The partial observation contains the following for each robot: - Balls: **[x, y, radius, ball owned status, closest robot status]** - Robots (self not included): **[x, y, cos(angle), sin(angle), team, fallen or penalized]** - Goalposts: **[x, y, radius]** - Crosses: **[x, y, radius]** - Lines: **[x1, y1, x2, y2]** - Center circle: **[x, y, radius]** Ball owned status is 0 if the ball is not owned, +1 if the ball is owned by the robot's team and -1 if owned by the opposite team. In the partial sighting case, the positions and angles are returned relative to the robot's position and head angle. The image observations contain 2D images of semantic labels. The images have 4 binary channels: - 0: Ball - 1: Robot - 2: Goalpost - 3: Line #### Driving The full state contains the following: - Cars: **[x, y, cos(angle), sin(angle), width, height, finished]** - Obstacles: **[x, y, cos(angle), sin(angle), width, height]** - Pedestrians: **[x, y]** - Lanes: **[x1, y1, x2, y2, type]** Lane type is 0 for standard lanes, 1 for the middle lane and -1 for the edge of the road. If the observation is full state, the car's own position is returned in a separate list, identical to the Self entry below. The partial observation contains the following for each car: - Self: **[x, y, cos(angle), sin(angle), width, height, goal_x, goal_y, finished]** - Cars: **[x, y, cos(angle), sin(angle), width, height]** - Obstacles: **[x, y, cos(angle), sin(angle), width, height]** - Pedestrians: **[x, y]** - Lanes: **[signed distance, cos(angle), sin(angle), type]** Widths and heights are also normalized. ## Coding conventions - Functions: - lower case names, usually verbs - `__function`: private function in base class, children cannot use it - `_function`: private function, children can use it - `function`: everyone can use it - Variables: - camelCase: with **lowercase** initial - usually nouns - Classes: - CamelCase: with **uppercase** initial - usually nouns
PypiClean
/Nevow-0.14.5.tar.gz/Nevow-0.14.5/examples/pastebin/pastebin/web/pages.py
from cStringIO import StringIO import time from zope.interface import implements from twisted.python import htmlizer from twisted.web import static from nevow import loaders from nevow import rend from nevow import tags from nevow import url from formless import annotate from formless import iformless from formless import webform ANONYMOUS = 'anonymous' ## # Text colourisers (aka syntax highlighting) ## def _python_colouriser(text): out = StringIO() try: htmlizer.filter(StringIO(text), out) except AttributeError: out = StringIO("""Starting after Nevow 0.4.1 Twisted 2.0 is a required dependency. Please install it""") return out.getvalue() _colourisers = { 'python': _python_colouriser } ## # Formless ## class IAddPasting(annotate.TypedInterface): def addPasting( request=annotate.Request(), author=annotate.String(strip=True), text=annotate.Text(strip=True, required=True)): pass addPasting = annotate.autocallable(addPasting) class IEditPasting(annotate.TypedInterface): def editPasting( request=annotate.Request(), postedBy=annotate.String(immutable=1), author=annotate.String(strip=True), text=annotate.Text(strip=True, required=True)): pass editPasting = annotate.autocallable(editPasting) ## # "Standard" renderers ## def render_time(theTime): def _(context, data): return time.strftime('%Y-%m-%d %H:%M:%S %Z', theTime) return _ def render_pastingText(text): def _(context, data): colouriser = _colourisers.get('python') if colouriser: return tags.xml(colouriser(text)) return tags.pre[tags.xml(text)] return _ def render_pasting(version): def _(context, data): context.fillSlots('author', version.getAuthor() or ANONYMOUS) time = context.fillSlots('time', render_time(version.getTime())) text = context.fillSlots('text', render_pastingText(version.getText())) return context.tag return _ class BasePage(rend.Page): docFactory = loaders.htmlfile(templateDir='templates', template='site.html') child_css = static.File('static/css') child_images = static.File('static/images') def data_pastings(self, context, data): return self.pastebin.getListOfPastings(20) def render_pasting(self, context, data): oid, author, time = data context.tag.fillSlots('url', url.root.child(str(oid))) context.tag.fillSlots('id', oid) context.tag.fillSlots('author', author or ANONYMOUS) return context.tag def render_content(self, context, data): tag = context.tag.clear() tag[loaders.htmlfile(templateDir='templates', template=self.contentTemplateFile)] return tag class RootPage(BasePage): implements(IAddPasting) addSlash = True def __init__(self, pastebin): BasePage.__init__(self) self.pastebin = pastebin def locateChild(self, context, segments): try: return Pasting(self.pastebin, int(segments[0])), segments[1:] except ValueError: pass return BasePage.locateChild(self, context, segments) def render_content(self, context, data): tag = context.tag.clear() return tag[webform.renderForms()] def addPasting(self, request, author, text): oid = self.pastebin.addPasting(author, text) request.setComponent(iformless.IRedirectAfterPost, '/'+str(oid)) class Pasting(BasePage): implements(IEditPasting) contentTemplateFile = 'pasting.html' def __init__(self, pastebin, pastingOid, version=-1): BasePage.__init__(self) self.pastebin = pastebin self.pastingOid = pastingOid self.version = version self.pasting = self.pastebin.getPasting(self.pastingOid) def locateChild(self, context, segments): try: return Pasting(self.pastebin, self.pastingOid, int(segments[0])), segments[1:] except: pass return BasePage.locateChild(self, context, segments) def data_history(self, context, data): return self.pasting.getHistory() def render_aPasting(self, context, data): return render_pasting(self.pasting.getVersion(self.version)) def render_form(self, context, data): if self.version != -1: return '' version = self.pasting.getVersion(self.version) formDefaults = context.locate(iformless.IFormDefaults) formDefaults.setDefault('editPasting.text', version.getText()) formDefaults.setDefault('editPasting.postedBy', version.getAuthor()) return webform.renderForms() def render_version(self, context, data): version, author, theTime = data if self.version == -1: u = url.here.child else: u = url.here.sibling context.tag.fillSlots('url', u(version)) context.tag.fillSlots('time', render_time(theTime)) context.tag.fillSlots('author', author or ANONYMOUS) ## context.fillSlots('link', a(href=[u(version)])[ ## render_time(theTime), ' (',author or ANONYMOUS,')' ## ]) return context.tag def editPasting(self, request, postedBy, author, text): self.pastebin.updatePasting(self.pastingOid, author, text) request.setComponent(iformless.IRedirectAfterPost, '/%s'%self.pastingOid) class Version(BasePage): contentTemplateFile = "pasting.html" child_ = rend.FourOhFour() def __init__(self, pastebin, pasting, version): BasePage.__init__(self) self.pastebin = pastebin self.pasting = pasting self.version = version def data_history(self, context, data): return self.pasting.getHistory() def render_aPasting(self, context, data): return render_pasting(self.pasting.getVersion(self.version)) def render_version(self, context, data): version, author, theTime = data context.fillSlots('link', tags.a(href=[url.here.sibling(str(version))])[ render_time(theTime), ' (',author,')' ]) return context.tag
PypiClean
/Font-Awesome-Flask-0.1.1.tar.gz/Font-Awesome-Flask-0.1.1/src/flask_font_awesome/__init__.py
import re import sys import urllib.request from pathlib import Path from typing import Optional, Union from flask import Blueprint, Flask, Markup, current_app, url_for __version__ = "0.1.1" STATIC_FOLDER = Path(__file__).parent / "static" CDN_URL_TEMPLATE = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/{version}/{type}/{style}{possibly_min}.{ext}" VERSION_PATTERN = re.compile(r"Font Awesome (?:Free\s)?(\d+.\d+.\d+)") def _remove_prefix(s: str, prefix: str) -> str: if sys.version_info < (3, 9): return s[len(prefix) :] if s.startswith(prefix) else s return s.removeprefix(prefix) class FontAwesome: """Font Awesome icons for Flask.""" style = "all" style_choices = ("all", "regular", "solid", "brands") core_style = "fontawesome" use_min = True use_css = False version = "6.2.0" css_sri_map = { "all": "sha512-xh6O/CkQoPOWDdYTDqeRdPCVd1SpvCA9XXcUnZS2FmJNp1coAFzvtCN9BmamE+4aHK8yyUHUSCcJHgXloTyT2A==", "regular": "sha512-aNH2ILn88yXgp/1dcFPt2/EkSNc03f9HBFX0rqX3Kw37+vjipi1pK3L9W08TZLhMg4Slk810sPLdJlNIjwygFw==", "solid": "sha512-uj2QCZdpo8PSbRGL/g5mXek6HM/APd7k/B5Hx/rkVFPNOxAQMXD+t+bG4Zv8OAdUpydZTU3UHmyjjiHv2Ww0PA==", "brands": "sha512-+oRH6u1nDGSm3hH8poU85YFIVTdSnS2f+texdPGrURaJh8hzmhMiZrQth6l56P4ZQmxeZzd2DqVEMqQoJ8J89A==", "fontawesome": "sha512-uj2QCZdpo8PSbRGL/g5mXek6HM/APd7k/B5Hx/rkVFPNOxAQMXD+t+bG4Zv8OAdUpydZTU3UHmyjjiHv2Ww0PA==", } js_sri_map = { "all": "sha512-naukR7I+Nk6gp7p5TMA4ycgfxaZBJ7MO5iC3Fp6ySQyKFHOGfpkSZkYVWV5R7u7cfAicxanwYQ5D1e17EfJcMA==", "regular": "sha512-Kcbb5bDGCQQwo67YHS9uDvRmyrNEqHLPA1Kmn0eqrritqGDp3OpkBGvHk36GNEH44MtWM1L5k3i9MSQPMkNIuA==", "solid": "sha512-dcTe66qF6q/NW1X64tKXnDDcaVyRowrsVQ9wX6u7KSQpYuAl5COzdMIYDg+HqAXhPpIz1LO9ilUCL4qCbHN5Ng==", "brands": "sha512-1e+6G7fuQ5RdPcZcRTnR3++VY2mjeh0+zFdrD580Ell/XcUw/DQLgad5XSCX+y2p/dmJwboZYBPoiNn77YAL5A==", "fontawesome": "sha512-j3gF1rYV2kvAKJ0Jo5CdgLgSYS7QYmBVVUjduXdoeBkc4NFV4aSRTi+Rodkiy9ht7ZYEwF+s09S43Z1Y+ujUkA==", } webfonts_map = { "regular": "fa-regular-400", "solid": "fa-solid-900", "brands": "fa-brands-400", } def __init__(self, app: Optional[Flask] = None) -> None: if app is not None: self.init_app(app) def init_app(self, app: Flask) -> None: """Initialize the Flask application for use with this extension instance.""" # register extension instance with the Flask application if not hasattr(app, "extensions"): app.extensions = {} app.extensions["font_awesome"] = self # create and register blueprint for this extension instance blueprint = Blueprint( "font_awesome", __name__, static_folder=STATIC_FOLDER.name, static_url_path=f"/font_awesome{app.static_url_path}", template_folder="templates", ) app.register_blueprint(blueprint) # register extension instance with the Jinja2 environment (for use in templates) app.jinja_env.globals["font_awesome"] = self # set default configuration values for this extension instance app.config.setdefault("FONT_AWESOME_SERVE_LOCAL", False) @staticmethod def _get_file( style: str, use_min: bool, ext: str, type: Optional[str] = None ) -> Path: """Get the file path for the given style, extension, and possibly-minified suffix.""" possibly_min = ".min" if use_min else "" return ( STATIC_FOLDER / (type if type is not None else ext) / f"{style}{possibly_min}.{ext}" ) @staticmethod def _get_url( version: str, style: str, use_min: bool, ext: str, serve_local: bool, type: Optional[str] = None, ) -> str: """Get the URL for the given version, style, extension, and possibly-minified suffix.""" possibly_min = ".min" if use_min else "" if serve_local: return url_for( "font_awesome.static", filename=f"{ext}/{style}{possibly_min}.{ext}" ) return CDN_URL_TEMPLATE.format( version=version, type=type if type is not None else ext, style=style, possibly_min=possibly_min, ext=ext, ) @staticmethod def _get_version(file: Path) -> Optional[str]: """Get the version from the given file.""" match = VERSION_PATTERN.search(file.read_text()) return match.group(1) if match is not None else None @classmethod def _request_file( cls, version: str, style: str, use_min: bool, ext: str, file: Path, type: Optional[str] = None, ) -> None: """Request the file for serving locally.""" file.parent.mkdir(parents=True, exist_ok=True) with urllib.request.urlopen( cls._get_url(version, style, use_min, ext, False, type) ) as response: file.write_bytes(response.read()) @classmethod def _request_webfont_files( cls, version: str, webfont_style: str, ) -> None: """Request the webfont files (ttf and woff2) for serving locally.""" _type = "webfonts" for ext in ("ttf", "woff2"): file = cls._get_file(webfont_style, False, ext, _type) cls._request_file(version, webfont_style, False, ext, file, _type) @classmethod def _possibly_request_file( cls, version: str, style: str, use_min: bool, ext: str ) -> None: """Possibly request the file for serving locally.""" file = cls._get_file(style, use_min, ext) if not file.exists() or cls._get_version(file) != version: cls._request_file(version, style, use_min, ext, file) if ext == "css": # also request webfonts if style == "all": for _style in cls.style_choices[1:]: webfont_style = cls.webfonts_map[_style] cls._request_webfont_files(version, webfont_style) else: webfont_style = cls.webfonts_map[style] cls._request_webfont_files(version, webfont_style) def load( self, version: str = version, style: str = style, css_sri: str = css_sri_map[style], core_css_sri: str = css_sri_map[core_style], js_sri: str = js_sri_map[style], core_js_sri: str = js_sri_map[core_style], use_min: bool = use_min, use_css: bool = use_css, ) -> Markup: """Load Font Awesome's `WebFonts + CSS <https://fontawesome.com/docs/web/setup/host-yourself/webfonts>`_ / `SVG + JS <https://fontawesome.com/docs/web/setup/host-yourself/svg-js>`_ resources for the given version. Defaults to `SVG + JS`. Some examples: >>> font_awesome.load() >>> font_awesome.load(style="solid", use_css=True) >>> font_awesome.load( ... version="5.9.0", ... js_sri="sha512-q3eWabyZPc1XTCmF+8/LuE1ozpg5xxn7iO89yfSOd5/oKvyqLngoNGsx8jq92Y8eXJ/IRxQbEC+FGSYxtk2oiw==" ... ) Args: version (str): The version to load. Defaults to `6.2.0`. style (str): The `icon style(s) <https://fontawesome.com/v6/docs/web/dig-deeper/styles>`_ to load. Defaults to `all`. css_sri (str): The `Subresource Integrity (SRI) <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the CSS resource file when not served locally. Defaults to `sha512-xh6O/CkQoPOWDdYTDqeRdPCVd1SpvCA9XXcUnZS2FmJNp1coAFzvtCN9BmamE+4aHK8yyUHUSCcJHgXloTyT2A==`. core_css_sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the core CSS resource file when not served locally. Defaults to `sha512-uj2QCZdpo8PSbRGL/g5mXek6HM/APd7k/B5Hx/rkVFPNOxAQMXD+t+bG4Zv8OAdUpydZTU3UHmyjjiHv2Ww0PA==`. js_sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the JS resource file when not served locally. Defaults to `sha512-naukR7I+Nk6gp7p5TMA4ycgfxaZBJ7MO5iC3Fp6ySQyKFHOGfpkSZkYVWV5R7u7cfAicxanwYQ5D1e17EfJcMA==`. core_js_sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the core JS resource file when not served locally. Defaults to `sha512-j3gF1rYV2kvAKJ0Jo5CdgLgSYS7QYmBVVUjduXdoeBkc4NFV4aSRTi+Rodkiy9ht7ZYEwF+s09S43Z1Y+ujUkA==`. use_min (bool): Whether to use the minified resource or not. Defaults to `True`. use_css (bool): Whether to use `WebFonts + CSS <https://fontawesome.com/docs/web/setup/host-yourself/webfonts>`_ over `SVG + JS <https://fontawesome.com/docs/web/setup/host-yourself/svg-js>`_. Defaults to `False`. Raises: ValueError: When trying to load a non-free icon style (i.e. not one of `all`, `regular`, `solid`, or `brands`) Returns: flask.Markup: The HTML markup for the WebFonts + CSS / SVG + JS resource(s). """ if use_css: return self.load_css(style, version, css_sri, core_css_sri, use_min) return self.load_js(style, version, js_sri, core_js_sri, use_min) def load_css( self, version: str = version, style: str = style, sri: str = css_sri_map[style], core_sri: str = css_sri_map[core_style], use_min: bool = use_min, ) -> Markup: """Load Font Awesome's `WebFonts + CSS <https://fontawesome.com/docs/web/setup/host-yourself/webfonts>`_ resources for the given version. Some examples: >>> font_awesome.load_css() >>> font_awesome.load_css(style="regular") Args: version (str): The version to load. Defaults to `6.2.0`. style (str): The `icon style(s) <https://fontawesome.com/v6/docs/web/dig-deeper/styles>`_ to load. Defaults to `all`. sri (str): The `Subresource Integrity (SRI) <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the CSS resource file when not served locally. Defaults to `sha512-xh6O/CkQoPOWDdYTDqeRdPCVd1SpvCA9XXcUnZS2FmJNp1coAFzvtCN9BmamE+4aHK8yyUHUSCcJHgXloTyT2A==`. core_sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the core CSS resource file when not served locally. Defaults to `sha512-uj2QCZdpo8PSbRGL/g5mXek6HM/APd7k/B5Hx/rkVFPNOxAQMXD+t+bG4Zv8OAdUpydZTU3UHmyjjiHv2Ww0PA==`. use_min (bool): Whether to use the minified resources or not. Defaults to `True`. Raises: ValueError: When trying to load a non-free icon style (i.e. not one of `all`, `regular`, `solid`, or `brands`) Returns: flask.Markup: The HTML markup for the WebFonts + CSS resources. """ if style not in self.style_choices: raise ValueError(f"`style` must be one of {', '.join(self.style_choices)}") serve_local = current_app.config["FONT_AWESOME_SERVE_LOCAL"] ext = "css" url = self._get_url(version, style, use_min, ext, serve_local) if serve_local: self._possibly_request_file(version, style, use_min, ext) css = f'<link rel="stylesheet" href="{url}" />' else: css = f'<link rel="stylesheet" href="{url}" integrity="{sri}" crossorigin="anonymous" />' if style != "all": core_url = self._get_url( version, self.core_style, use_min, ext, serve_local ) if serve_local: self._possibly_request_file(version, self.core_style, use_min, ext) css += f'\n<link rel="stylesheet" href="{core_url}" />' else: css += f'\n<link rel="stylesheet" href="{core_url}" integrity="{core_sri}" crossorigin="anonymous" />' return Markup(css) def load_js( self, version: str = version, style: str = style, sri: str = js_sri_map[style], core_sri: str = js_sri_map[core_style], use_min: bool = use_min, ) -> Markup: """Load Font Awesome's `SVG + JS <https://fontawesome.com/docs/web/setup/host-yourself/svg-js>`_ resource for the given version. Some examples: >>> font_awesome.load_js() >>> font_awesome.load_js( ... use_min=False, ... sri="sha512-8XtSBQOB+R4dpcpQBpYC5Q7ti7y/MjIF0l/1ZiSd4xM04Dr052S/Py981Pzmwo2HrXCR2JhYE1MYR15aZGMnig==" ... ) Args: version (str): The version to load. Defaults to `6.2.0`. style (str): The `icon style(s) <https://fontawesome.com/v6/docs/web/dig-deeper/styles>`_ to load. Defaults to `all`. sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the JS resource file when not served locally. Defaults to `sha512-naukR7I+Nk6gp7p5TMA4ycgfxaZBJ7MO5iC3Fp6ySQyKFHOGfpkSZkYVWV5R7u7cfAicxanwYQ5D1e17EfJcMA==`. core_sri (str): The `SRI <https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity>`_ for the core JS resource file when not served locally. Defaults to `sha512-j3gF1rYV2kvAKJ0Jo5CdgLgSYS7QYmBVVUjduXdoeBkc4NFV4aSRTi+Rodkiy9ht7ZYEwF+s09S43Z1Y+ujUkA==`. use_min (bool): Whether to use the minified resource or not. Defaults to `True`. Raises: ValueError: When trying to load a non-free icon style (i.e. not one of `all`, `regular`, `solid`, or `brands`) Returns: flask.Markup: The HTML markup for the SVG + JS resource. """ if style not in self.style_choices: raise ValueError(f"`style` must be one of {', '.join(self.style_choices)}") serve_local = current_app.config["FONT_AWESOME_SERVE_LOCAL"] ext = "js" url = self._get_url(version, style, use_min, ext, serve_local) if serve_local: self._possibly_request_file(version, style, use_min, ext) js = f'<script defer src="{url}"></script>' else: js = f'<script defer src="{url}" integrity="{sri}" crossorigin="anonymous"></script>' if style != "all": core_url = self._get_url( version, self.core_style, use_min, ext, serve_local ) if serve_local: self._possibly_request_file(version, self.core_style, use_min, ext) js += f'\n<script defer src="{core_url}"></script>' else: js += f'\n<script defer src="{core_url}" integrity="{core_sri}" crossorigin="anonymous"></script>' return Markup(js) def render_icon( self, name: str, inverse: bool = False, size: Optional[str] = None, fixed_with: bool = False, rotation: Optional[Union[str, int]] = None, animation: Optional[str] = None, border: bool = False, pull: Optional[str] = None, swap_opacity: bool = False, aria_hidden: bool = True, style: Optional[str] = None, _stack_size: Optional[str] = None, ) -> Markup: """Render an icon. See the `Font Awesome documentation <https://fontawesome.com/search?o=r&m=free>`_ for the complete list of available icons. Some examples: >>> font_awesome.render_icon('fas fa-house') >>> font_awesome.render_icon('fa-regular fa-square', size='xl') >>> font_awesome.render_icon('fab fa-github', inverse=True, rotation=90) Args: name (str): The name of the icon (e.g. `fa-solid fa-user`). inverse (bool): Inverts the color of the icon to white. Defaults to `False`. size (Optional[str]): The `relative or literal size <https://fontawesome.com/v6/docs/web/style/size>`_ of the icon. Defaults to `None`. fixed_with (bool): Set the icon to a `fixed width <https://fontawesome.com/v6/docs/web/style/fixed-width>`_ for easy vertical alignment. Defaults to `False`. rotation (Optional[Union[str, int]]): `Rotate or flip <https://fontawesome.com/v6/docs/web/style/rotate>`_ the icon. Defaults to `None`. animation (Optional[str]): `Animate <https://fontawesome.com/v6/docs/web/style/animate>`_ the icon. Defaults to `None`. border (bool): Add a `border <https://fontawesome.com/v6/docs/web/style/pull>`_ to the icon. Defaults to `False`. pull (Optional[str]): `Pull <https://fontawesome.com/v6/docs/web/style/pull>`_ the icon left or right. Defaults to `None`. swap_opacity (bool): Swap the default opacity of each layer in a `duotone <https://fontawesome.com/v6/docs/web/style/duotone>`_ icon. Defaults to `False`. aria_hidden (bool): Add the `aria-hidden` attribute to the icon. Defaults to `True`. style (Optional[str]): Customize the icon even further using `CSS styling <https://fontawesome.com/v6/docs/web/style/custom>`_. Defaults to `None`. Returns: flask.Markup: The HTML markup for the icon. """ icon = f'<i class="{name}' if _stack_size: icon += f" fa-stack-{_remove_prefix(_stack_size, 'fa-stack-')}" if inverse: icon += " fa-inverse" if size is not None: icon += f" fa-{_remove_prefix(size, 'fa-')}" if fixed_with: icon += " fa-fw" if rotation is not None: if isinstance(rotation, int): rotation = f"rotate-{rotation}" icon += f" fa-{_remove_prefix(rotation, 'fa-')}" if animation is not None: icon += f" fa-{_remove_prefix(animation, 'fa-')}" if border: icon += " fa-border" if pull is not None: icon += f" fa-pull-{_remove_prefix(pull, 'fa-pull-')}" if swap_opacity: icon += " fa-swap-opacity" icon += '"' if style is not None: icon += f' style="{style}"' if aria_hidden: icon += ' aria-hidden="true"' icon += "></i>" return Markup(icon) def render_stacked_icon( self, name_1: str, name_2: str, stack_size_1: str = "2x", stack_size_2: str = "1x", inverse: bool = False, size: Optional[str] = None, aria_hidden: bool = True, style: Optional[str] = None, style_1: Optional[str] = None, style_2: Optional[str] = None, ) -> Markup: """Render a `stacked <https://fontawesome.com/v6/docs/web/style/stack>`_ icon. Some examples: >>> font_awesome.render_stacked_icon( ... "fa-solid fa-square", ... "fab fa-twitter", ... inverse=True, ... size="2x", ... ) >>> font_awesome.render_stacked_icon( ... "fa-solid fa-camera", ... "fa-solid fa-ban", ... stack_size_1="1x", ... stack_size_2="2x", ... size="2x", ... style_2="color:Tomato", ... ) Args: name_1 (str): The name of the first icon (e.g. `fa-solid fa-square`). name_2 (str): The name of the second icon (e.g. `fab fa-twitter`). stack_size_1 (str): The relative size of the first icon. One of `1x` or `2x`. Defaults to `2x`. stack_size_2 (str): The relative size of the second icon. One of `1x` or `2x`. Defaults to `1x`. inverse (bool): Inverts the color of the icon to white. Defaults to `False`. size (Optional[str]): The `relative or literal size <https://fontawesome.com/v6/docs/web/style/size>`_ of the stacked icon. Defaults to `None`. aria_hidden (bool): Add the `aria-hidden` attribute to the icon. Defaults to `True`. style (Optional[str]): Customize the stacked icon even further using `CSS styling <https://fontawesome.com/v6/docs/web/style/custom>`_. Defaults to `None`. style_1 (Optional[str]): Customize the first icon even further using `CSS styling <https://fontawesome.com/v6/docs/web/style/custom>`_. Defaults to `None`. style_2 (Optional[str]): Customize the second icon even further using `CSS styling <https://fontawesome.com/v6/docs/web/style/custom>`_. Defaults to `None`. Returns: flask.Markup: The HTML markup for the icon. """ span = '<span class="fa-stack' if size is not None: span += f" fa-{_remove_prefix(size, 'fa-')}" span += '"' if style is not None: span += f' style="{style}"' if aria_hidden: span += ' aria-hidden="true"' span += ">" span += f"\n {self.render_icon(name_1, inverse if stack_size_1 == '1x' else False, aria_hidden=False, style=style_1, _stack_size=stack_size_1)}" span += f"\n {self.render_icon(name_2, inverse if stack_size_2 == '1x' else False, aria_hidden=False, style=style_2, _stack_size=stack_size_2)}" span += "\n</span>" return Markup(span)
PypiClean
/BuildStream-2.0.1-cp39-cp39-manylinux_2_28_x86_64.whl/buildstream/plugins/sources/tar.py
import os import tarfile from contextlib import contextmanager from tempfile import TemporaryFile from buildstream import DownloadableFileSource, SourceError from buildstream import utils class ReadableTarInfo(tarfile.TarInfo): """ The goal is to override `TarFile`'s `extractall` semantics by ensuring that on extraction, the files are readable by the owner of the file. This is done by overriding the accessor for the `mode` attribute in `TarInfo`, the class that encapsulates the internal meta-data of the tarball, so that the owner-read bit is always set. """ # https://github.com/python/mypy/issues/4125 @property # type: ignore def mode(self): # Respect umask instead of the file mode stored in the archive. # The only bit used from the embedded mode is the executable bit for files. umask = utils.get_umask() if self.isdir() or bool(self.__permission & 0o100): return 0o777 & ~umask else: return 0o666 & ~umask @mode.setter def mode(self, permission): self.__permission = permission # pylint: disable=attribute-defined-outside-init class TarSource(DownloadableFileSource): # pylint: disable=attribute-defined-outside-init BST_MIN_VERSION = "2.0" def configure(self, node): super().configure(node) self.base_dir = node.get_str("base-dir", "*") node.validate_keys(DownloadableFileSource.COMMON_CONFIG_KEYS + ["base-dir"]) def preflight(self): self.host_lzip = None if self.url.endswith(".lz"): self.host_lzip = utils.get_host_tool("lzip") def get_unique_key(self): return super().get_unique_key() + [self.base_dir] @contextmanager def _run_lzip(self): assert self.host_lzip with TemporaryFile() as lzip_stdout: with open(self._get_mirror_file(), "r") as lzip_file: self.call([self.host_lzip, "-d"], stdin=lzip_file, stdout=lzip_stdout) lzip_stdout.seek(0, 0) yield lzip_stdout @contextmanager def _get_tar(self): if self.url.endswith(".lz"): with self._run_lzip() as lzip_dec: with tarfile.open(fileobj=lzip_dec, mode="r:", tarinfo=ReadableTarInfo) as tar: yield tar else: with tarfile.open(self._get_mirror_file(), tarinfo=ReadableTarInfo) as tar: yield tar def stage(self, directory): try: with self._get_tar() as tar: base_dir = None if self.base_dir: base_dir = self._find_base_dir(tar, self.base_dir) def filter_non_dev(tarfiles): for file in tarfiles: if not file.isdev(): yield file if base_dir: tar.extractall( path=directory, members=filter_non_dev(self._extract_members(tar, base_dir, directory)) ) else: tar.extractall(path=directory, members=filter_non_dev(tar.getmembers())) except (tarfile.TarError, OSError) as e: raise SourceError("{}: Error staging source: {}".format(self, e)) from e # Override and translate which filenames to extract def _extract_members(self, tar, base_dir, target_dir): # Assert that a tarfile is safe to extract; specifically, make # sure that we don't do anything outside of the target # directory (this is possible, if, say, someone engineered a # tarfile to contain paths that start with ..). def assert_safe(member): final_path = os.path.abspath(os.path.join(target_dir, member.path)) if not final_path.startswith(target_dir): raise SourceError( "{}: Tarfile attempts to extract outside the staging area: " "{} -> {}".format(self, member.path, final_path) ) if member.islnk(): linked_path = os.path.abspath(os.path.join(target_dir, member.linkname)) if not linked_path.startswith(target_dir): raise SourceError( "{}: Tarfile attempts to hardlink outside the staging area: " "{} -> {}".format(self, member.path, final_path) ) # Don't need to worry about symlinks because they're just # files here and won't be able to do much harm once we are # in a sandbox. if not base_dir.endswith(os.sep): base_dir = base_dir + os.sep L = len(base_dir) for member in tar.getmembers(): # First, ensure that a member never starts with `./` if member.path.startswith("./"): member.path = member.path[2:] if member.islnk() and member.linkname.startswith("./"): member.linkname = member.linkname[2:] # Now extract only the paths which match the normalized path if member.path.startswith(base_dir): # Hardlinks are smart and collapse into the "original" # when their counterpart doesn't exist. This means we # only need to modify links to files whose location we # change. # # Since we assert that we're not linking to anything # outside the target directory, this should only ever # be able to link to things inside the target # directory, so we should cover all bases doing this. # if member.islnk() and member.linkname.startswith(base_dir): member.linkname = member.linkname[L:] member.path = member.path[L:] assert_safe(member) yield member # We want to iterate over all paths of a tarball, but getmembers() # is not enough because some tarballs simply do not contain the leading # directory paths for the archived files. def _list_tar_paths(self, tar): visited = set() for member in tar.getmembers(): # Remove any possible leading './', offer more consistent behavior # across tarballs encoded with or without a leading '.' member_name = member.name.lstrip("./") if not member.isdir(): # Loop over the components of a path, for a path of a/b/c/d # we will first visit 'a', then 'a/b' and then 'a/b/c', excluding # the final component components = member_name.split("/") for i in range(len(components) - 1): dir_component = "/".join([components[j] for j in range(i + 1)]) if dir_component not in visited: visited.add(dir_component) try: # Dont yield directory members which actually do # exist in the archive _ = tar.getmember(dir_component) except KeyError: if dir_component != ".": yield dir_component continue # Avoid considering the '.' directory, if any is included in the archive # this is to avoid the default 'base-dir: *' value behaving differently # depending on whether the tarball was encoded with a leading '.' or not if member_name == ".": continue yield member_name def _find_base_dir(self, tar, pattern): paths = self._list_tar_paths(tar) matches = sorted(list(utils.glob(paths, pattern))) if not matches: raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern)) return matches[0] def setup(): return TarSource
PypiClean
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/xmpp/UserService.js
define(["dijit","dojo","dojox"],function(_1,_2,_3){ _2.provide("dojox.xmpp.UserService"); _2.declare("dojox.xmpp.UserService",null,{constructor:function(_4){ this.session=_4; },getPersonalProfile:function(){ var _5={id:this.session.getNextIqId(),type:"get"}; var _6=new _3.string.Builder(_3.xmpp.util.createElement("iq",_5,false)); _6.append(_3.xmpp.util.createElement("query",{xmlns:"jabber:iq:private"},false)); _6.append(_3.xmpp.util.createElement("sunmsgr",{xmlsns:"sun:xmpp:properties"},true)); _6.append("</query></iq>"); var _7=this.session.dispatchPacket(_6.toString(),"iq",_5.id); _7.addCallback(this,"_onGetPersonalProfile"); },setPersonalProfile:function(_8){ var _9={id:this.session.getNextIqId(),type:"set"}; var _a=new _3.string.Builder(_3.xmpp.util.createElement("iq",_9,false)); _a.append(_3.xmpp.util.createElement("query",{xmlns:"jabber:iq:private"},false)); _a.append(_3.xmpp.util.createElement("sunmsgr",{xmlsns:"sun:xmpp:properties"},false)); for(var _b in _8){ _a.append(_3.xmpp.util.createElement("property",{name:_b},false)); _a.append(_3.xmpp.util.createElement("value",{},false)); _a.append(_8[_b]); _a.append("</value></props>"); } _a.append("</sunmsgr></query></iq>"); var _c=this.session.dispatchPacket(_a.toString(),"iq",_9.id); _c.addCallback(this,"_onSetPersonalProfile"); },_onSetPersonalProfile:function(_d){ if(_d.getAttribute("type")=="result"){ this.onSetPersonalProfile(_d.getAttribute("id")); }else{ if(_d.getAttribute("type")=="error"){ var _e=this.session.processXmppError(_d); this.onSetPersonalProfileFailure(_e); } } },onSetPersonalProfile:function(id){ },onSetPersonalProfileFailure:function(_f){ },_onGetPersonalProfile:function(_10){ if(_10.getAttribute("type")=="result"){ var _11={}; if(_10.hasChildNodes()){ var _12=_10.firstChild; if((_12.nodeName=="query")&&(_12.getAttribute("xmlns")=="jabber:iq:private")){ var _13=_12.firstChild; if((_13.nodeName=="query")&&(_13.getAttributes("xmlns")=="sun:xmpp:properties")){ for(var i=0;i<_13.childNodes.length;i++){ var n=_13.childNodes[i]; if(n.nodeName=="property"){ var _14=n.getAttribute("name"); var val=n.firstChild||""; _11[_14]=val; } } } } this.onGetPersonalProfile(_11); } }else{ if(_10.getAttribute("type")=="error"){ var err=this.session.processXmppError(_10); this.onGetPersonalProfileFailure(err); } } return _10; },onGetPersonalProfile:function(_15){ },onGetPersonalProfileFailure:function(err){ }}); });
PypiClean
/Comet-3.1.0.tar.gz/Comet-3.1.0/comet/protocol/subscriber.py
# Twisted protocol definition from twisted.internet import reactor from twisted.protocols.policies import TimeoutMixin from twisted.internet.protocol import ReconnectingClientFactory # Base protocol definitions from comet.protocol.base import EventHandler, VOEVENT_ROLES # Constructors for transport protocol messages from comet.protocol.messages import iamaliveresponse, authenticateresponse # Comet utility routines import comet.log as log from comet.utility import xml_document, ParseError __all__ = ["VOEventSubscriberFactory"] class VOEventSubscriber(EventHandler, TimeoutMixin): ALIVE_INTERVAL = 120 # If we get no traffic for ALIVE_INTERVAL seconds, # assume our peer forgot us. def __init__(self, filters=[]): self.filters = filters def connectionMade(self, *args): self.setTimeout(self.ALIVE_INTERVAL) return EventHandler.connectionMade(self, *args) def connectionLost(self, *args): # Don't leave the reactor in an unclean state when we exit. self.setTimeout(None) return EventHandler.connectionLost(self, *args) def timeoutConnection(self): log.info( "No iamalive received from %s for %d seconds; disconecting" % (self.transport.getPeer(), self.ALIVE_INTERVAL), system="VOEventSubscriber" ) return TimeoutMixin.timeoutConnection(self) def stringReceived(self, data): """ Called when a complete new message is received. """ try: incoming = xml_document(data) except ParseError: log.warn("Unparsable message received") return # Reset the timeout counter and wait another 120 seconds before # disconnecting due to inactivity. self.resetTimeout() # The root element of both VOEvent and Transport packets has a # "role" element which we use to identify the type of message we # have received. if incoming.element.get('role') == "iamalive": log.debug("IAmAlive received from %s" % str(self.transport.getPeer())) self.send_xml( iamaliveresponse(self.factory.local_ivo, incoming.element.find('Origin').text) ) elif incoming.element.get('role') == "authenticate": log.debug("Authenticate received from %s" % str(self.transport.getPeer())) self.send_xml( authenticateresponse( self.factory.local_ivo, incoming.element.find('Origin').text, self.filters ) ) elif incoming.element.get('role') in VOEVENT_ROLES: log.info( "VOEvent %s received from %s" % ( incoming.element.attrib['ivorn'], str(self.transport.getPeer()) ) ) # We don't send a NAK even if the event is invalid since we don't # want to be removed from upstream's distribution list. self.process_event(incoming, can_nak=False) else: log.warn( "Incomprehensible data received from %s (role=%s)" % (self.transport.getPeer(), incoming.element.get("role")) ) class VOEventSubscriberFactory(ReconnectingClientFactory): RESET_DELAY = 5 # Reset exponential backoff after connection survives for # at least RESET_DELAY seconds protocol = VOEventSubscriber callLater = reactor.callLater # Can be replaced in test cases def __init__(self, local_ivo=None, validators=None, handlers=None, filters=None, ): self.local_ivo = local_ivo self.handlers = handlers or [] self.validators = validators or [] self.filters = filters or [] # Calling resetDelay() now is not necessary, but we want # self.reset_call always to exist when we use it later self.reset_call = self.callLater(0, self.resetDelay) def buildProtocol(self, addr): self.reset_call = self.callLater(self.RESET_DELAY, self.resetDelay) p = self.protocol(self.filters) p.factory = self return p def stopFactory(self): if self.reset_call.active(): self.reset_call.cancel() def clientConnectionFailed(self, connector, reason): log.info( "Connection to %s failed; will retry in %d second%s" % (connector.getDestination(), self.delay, "" if self.delay == 1 else "s"), system="VOEventSubscriberFactory" ) ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) def clientConnectionLost(self, connector, reason): log.info( "Connection to %s lost; will retry in %d second%s" % (connector.getDestination(), self.delay, "" if self.delay == 1 else "s"), system="VOEventSubscriberFactory" ) if self.reset_call.active(): self.reset_call.cancel() ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
PypiClean
/FastSent-0.2.0.tar.gz/FastSent-0.2.0/README.rst
FastSent is Sentiment Classification python library. It uses Sequential model for Sentiment classfication. FastSent is developed using GRU(Gated Recurrent Unit) model. Requirement ------------ FastSent support Python 3.6 or newer. Installation ------------ pip install FastSent Example ------------- This package is being developed for sentiment classfication using Sequential model GRU(Gated Recurrent Unit). data = 'Sample.csv' labels = 'sentiment' text = 'content' f = FastSent() X_train, X_test, y_train, y_test = f.train_test_split(data, labels, text) trained_model = f.fit_train(X_train, y_train, 500, 50, 7789, 5, 4) prediction = f.predict(trained_model, X_test, y_train, 4) where ``sample`` is a training file containing labels and text. References ---------- DataSet Information ~~~~~~~~~~~~~~~~~~~ [1] Sample DataSet is being used for research purpose from `*data.world* <https://data.world/crowdflower/sentiment-analysis-in-text>`.
PypiClean
/GhTrack-1.1.1.tar.gz/GhTrack-1.1.1/docs/introduction.rst
Introduction ============ github-track is a Python library to use the `Github API v3 <http://developer.github.com/v3>`__. With it, you can pull any public repositories pull requests from Python scripts. **Sending email currently work only with sendGrid** Download and install -------------------- First of all make sure you have install python in your machine and the version is higher than `3.6`. If not please process as follow to install it. .. code-block:: bash >> brew install python@3.9 **Installation using pip** The easiest way to install is to use [Python Package Index](https://pypi.org/project/GhTrack/), so, a pip install should be enough. .. code-block:: bash >> pip3 install GhTrack **Installation by cloning the source code** If you have done the installation using pip, you can ignore this part. To use it please clone the [github-track](https://github.com/zinaLacina/github-track) repository. .. code-block:: bash >> git clone https://github.com/zinaLacina/github-track Once it clone please cd into the directory .. code-block:: bash >> cd github-track Once inside the direction check that you have the latest up to date of the setuptools. .. code-block:: bash python3 -m pip install --upgrade setuptools And lastly install the *module* .. code-block:: bash python3 setup.py install And you are all set for to run the application. Short tutorial --------------------- Let's test the base features of the module, that consist to pull the last 7 days pull requests of a public repo. By default the module has default value in the settings located in the data folder. The default repo is ``kubernetes``. So to get the list of the last 7 days pull requests of the ``kubernetes`` repo. Open a terminal, and in the console please type >>``python3`` After that, import the ``GhTrack`` module .. code-block:: python >> from GhTrack import GhTrack # create GhTrack object without any params(first of all the default params) >> g = GhTrack() #That will print on the console the html of the last 7 days pull requests >> g.sendEmailOrPrintConsole(emailNotConsole=False) You can also get the json format of the last 7 days pull requests .. code-block:: python >> from GhTrack import GhTrack >> g = GhTrack() >> pulls = g.getPulls() #json format >> pulls Then play with your Github objects:: for pull in pulls: print(pull["title"]) Licensing --------- This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org>
PypiClean
/FutureLog-0.1.1.tar.gz/FutureLog-0.1.1/README.md
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) # Installation ```pip install futurelog``` # Usage ## Introduction The goal of this library is to provide a way to defer logs and consume (print) them when needed, in an async application. For instance, it would perfectly fit a config deployer in async. It would help to keep messages grouped by servers. Usage should be limited to reporting and not error/exception logging. Also you should ensure you catch all possible exception in your program in your entrypoint, in order to consume all logs before exiting your application. ## Create a logger ```python from futurelog import FutureLogger future_logger = FutureLogger(__name__) ``` ## Register logs The methods supported are: `.debug()`, `.info()`, `.warning()`, `.error()`, `.critical()` ```python future_logger.debug(topic, msg) ``` Example: ```python future_logger.debug("server1", "deploying stuff 1") future_logger.error("server1", "failed") future_logger.debug("server2", "deploying stuff 1") future_logger.warning("server2", "success") ``` ## Consume logs ### One specific logger ```python logger.consume(topic) ``` Example: ```python future_logger.consume("server1") future_logger.consume("server2") ``` ### All loggers for a topic (one for each module) ```python FutureLogger.consume_all_logger_for(topic) ``` ```python FutureLogger.consume_all_logger_for("server1") FutureLogger.consume_all_logger_for("server2") ``` ### All unconsumed logger ```python FutureLogger.consume_all_logger() ```
PypiClean