content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """Tests that the seccomp filters don't let blacklisted syscalls through.""" import os from subprocess import run import pytest import host_tools.cargo_build as host # pylint:disable=import-error def test_seccomp_ls(tmp_basic_jailer): """Assert that the seccomp filters deny a blacklisted syscall.""" # pylint: disable=redefined-outer-name # The fixture pattern causes a pylint false positive for that rule. # Path to the `ls` binary, which attempts to execute `SYS_access`, # blacklisted for Firecracker. ls_command_path = '/bin/ls' demo_jailer = tmp_basic_jailer assert os.path.exists(demo_jailer) # Compile the mini jailer. outcome = run([demo_jailer, ls_command_path]) # The seccomp filters should send SIGSYS (31) to the binary. `ls` doesn't # handle it, so it will exit with error. assert outcome.returncode != 0 def test_advanced_seccomp_harmless(tmp_advanced_seccomp_binaries): """ Test `demo_harmless_firecracker`. Test that the built demo jailer allows the built demo harmless firecracker. """ # pylint: disable=redefined-outer-name # The fixture pattern causes a pylint false positive for that rule. demo_advanced_jailer, demo_harmless_firecracker, _ =\ tmp_advanced_seccomp_binaries assert os.path.exists(demo_advanced_jailer) assert os.path.exists(demo_harmless_firecracker) outcome = run([demo_advanced_jailer, demo_harmless_firecracker]) # The demo harmless firecracker should have terminated gracefully. assert outcome.returncode == 0 def test_advanced_seccomp_malicious(tmp_advanced_seccomp_binaries): """ Test `demo_malicious_firecracker`. Test that the built demo jailer denies the built demo malicious firecracker. """ # pylint: disable=redefined-outer-name # The fixture pattern causes a pylint false positive for that rule. demo_advanced_jailer, _, demo_malicious_firecracker =\ tmp_advanced_seccomp_binaries assert os.path.exists(demo_advanced_jailer) assert os.path.exists(demo_malicious_firecracker) outcome = run([demo_advanced_jailer, demo_malicious_firecracker]) # The demo malicious firecracker should have received `SIGSYS`. assert outcome.returncode != 0
[ 2, 15069, 2864, 6186, 13, 785, 11, 3457, 13, 393, 663, 29116, 13, 1439, 6923, 33876, 13, 198, 2, 30628, 55, 12, 34156, 12, 33234, 7483, 25, 24843, 12, 17, 13, 15, 198, 37811, 51, 3558, 326, 262, 384, 535, 3361, 16628, 836, 470, ...
2.79883
855
aa = a(1) aa.go()
[ 198, 7252, 796, 257, 7, 16, 8, 198, 7252, 13, 2188, 3419 ]
1.5
12
# This file was automatically generated by SWIG (http://www.swig.org). # Version 2.0.12 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. """ IDA Plugin SDK API wrapper: hexrays """ from sys import version_info if version_info >= (2,6,0): _ida_hexrays = swig_import_helper() del swig_import_helper else: import _ida_hexrays del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. try: _object = object _newclass = 1 except AttributeError: _newclass = 0 try: import weakref weakref_proxy = weakref.proxy except: weakref_proxy = lambda x: x import ida_idaapi import sys _BC695 = sys.modules["__main__"].IDAPYTHON_COMPAT_695_API if _BC695: import ida_pro import ida_xref import ida_typeinf import ida_idp def _kludge_use_TPopupMenu(*args): """ _kludge_use_TPopupMenu(m) """ return _ida_hexrays._kludge_use_TPopupMenu(*args) array_of_bitsets_swigregister = _ida_hexrays.array_of_bitsets_swigregister array_of_bitsets_swigregister(array_of_bitsets) mopvec_t_swigregister = _ida_hexrays.mopvec_t_swigregister mopvec_t_swigregister(mopvec_t) mcallargs_t_swigregister = _ida_hexrays.mcallargs_t_swigregister mcallargs_t_swigregister(mcallargs_t) block_chains_vec_t_swigregister = _ida_hexrays.block_chains_vec_t_swigregister block_chains_vec_t_swigregister(block_chains_vec_t) user_numforms_t_swigregister = _ida_hexrays.user_numforms_t_swigregister user_numforms_t_swigregister(user_numforms_t) lvar_mapping_t_swigregister = _ida_hexrays.lvar_mapping_t_swigregister lvar_mapping_t_swigregister(lvar_mapping_t) hexwarns_t_swigregister = _ida_hexrays.hexwarns_t_swigregister hexwarns_t_swigregister(hexwarns_t) ctree_items_t_swigregister = _ida_hexrays.ctree_items_t_swigregister ctree_items_t_swigregister(ctree_items_t) user_labels_t_swigregister = _ida_hexrays.user_labels_t_swigregister user_labels_t_swigregister(user_labels_t) user_cmts_t_swigregister = _ida_hexrays.user_cmts_t_swigregister user_cmts_t_swigregister(user_cmts_t) user_iflags_t_swigregister = _ida_hexrays.user_iflags_t_swigregister user_iflags_t_swigregister(user_iflags_t) user_unions_t_swigregister = _ida_hexrays.user_unions_t_swigregister user_unions_t_swigregister(user_unions_t) cinsnptrvec_t_swigregister = _ida_hexrays.cinsnptrvec_t_swigregister cinsnptrvec_t_swigregister(cinsnptrvec_t) eamap_t_swigregister = _ida_hexrays.eamap_t_swigregister eamap_t_swigregister(eamap_t) boundaries_t_swigregister = _ida_hexrays.boundaries_t_swigregister boundaries_t_swigregister(boundaries_t) def user_iflags_second(*args): """ user_iflags_second(p) -> int32 const & Get reference to the current map value. @param p (C++: user_iflags_iterator_t) """ return _ida_hexrays.user_iflags_second(*args) cfuncptr_t_swigregister = _ida_hexrays.cfuncptr_t_swigregister cfuncptr_t_swigregister(cfuncptr_t) qvector_history_t_swigregister = _ida_hexrays.qvector_history_t_swigregister qvector_history_t_swigregister(qvector_history_t) history_t_swigregister = _ida_hexrays.history_t_swigregister history_t_swigregister(history_t) qlist_cinsn_t_iterator_swigregister = _ida_hexrays.qlist_cinsn_t_iterator_swigregister qlist_cinsn_t_iterator_swigregister(qlist_cinsn_t_iterator) qvector_lvar_t_swigregister = _ida_hexrays.qvector_lvar_t_swigregister qvector_lvar_t_swigregister(qvector_lvar_t) qlist_cinsn_t_swigregister = _ida_hexrays.qlist_cinsn_t_swigregister qlist_cinsn_t_swigregister(qlist_cinsn_t) qvector_carg_t_swigregister = _ida_hexrays.qvector_carg_t_swigregister qvector_carg_t_swigregister(qvector_carg_t) qvector_ccase_t_swigregister = _ida_hexrays.qvector_ccase_t_swigregister qvector_ccase_t_swigregister(qvector_ccase_t) lvar_saved_infos_t_swigregister = _ida_hexrays.lvar_saved_infos_t_swigregister lvar_saved_infos_t_swigregister(lvar_saved_infos_t) ui_stroff_ops_t_swigregister = _ida_hexrays.ui_stroff_ops_t_swigregister ui_stroff_ops_t_swigregister(ui_stroff_ops_t) def qswap(*args): """ qswap(a, b) """ return _ida_hexrays.qswap(*args) fnum_array_swigregister = _ida_hexrays.fnum_array_swigregister fnum_array_swigregister(fnum_array) def debug_hexrays_ctree(*args): """ debug_hexrays_ctree(msg) """ return _ida_hexrays.debug_hexrays_ctree(*args) def init_hexrays_plugin(*args): """ init_hexrays_plugin(flags=0) -> bool Initialize your plugin for hex-rays decompiler. This function must be called before calling any other decompiler function. It initializes the pointer to the dispatcher. @param flags: reserved, must be 0 (C++: int) @return: true if the decompiler exists and the dispatcher pointer is ready to use. """ return _ida_hexrays.init_hexrays_plugin(*args) def get_widget_vdui(*args): """ get_widget_vdui(f) -> vdui_t Get the 'vdui_t' instance associated to the TWidget @param f: pointer to window (C++: TWidget *) @return: a vdui_t *, or NULL """ return _ida_hexrays.get_widget_vdui(*args) def boundaries_find(*args): """ boundaries_find(map, key) -> boundaries_iterator_t Find the specified key in boundaries_t. @param map (C++: const boundaries_t *) @param key (C++: const cinsn_t *&) """ return _ida_hexrays.boundaries_find(*args) def boundaries_insert(*args): """ boundaries_insert(map, key, val) -> boundaries_iterator_t Insert new ( 'cinsn_t' *, 'rangeset_t' ) pair into boundaries_t. @param map (C++: boundaries_t *) @param key (C++: const cinsn_t *&) @param val (C++: const rangeset_t &) """ return _ida_hexrays.boundaries_insert(*args) def term_hexrays_plugin(*args): """ term_hexrays_plugin() Stop working with hex-rays decompiler. """ return _ida_hexrays.term_hexrays_plugin(*args) Hexrays_Hooks_swigregister = _ida_hexrays.Hexrays_Hooks_swigregister Hexrays_Hooks_swigregister(Hexrays_Hooks) uval_ivl_t_swigregister = _ida_hexrays.uval_ivl_t_swigregister uval_ivl_t_swigregister(uval_ivl_t) uval_ivl_ivlset_t_swigregister = _ida_hexrays.uval_ivl_ivlset_t_swigregister uval_ivl_ivlset_t_swigregister(uval_ivl_ivlset_t) array_of_ivlsets_swigregister = _ida_hexrays.array_of_ivlsets_swigregister array_of_ivlsets_swigregister(array_of_ivlsets) MAX_SUPPORTED_STACK_SIZE = _ida_hexrays.MAX_SUPPORTED_STACK_SIZE def hexrays_alloc(*args): """ hexrays_alloc(size) -> void * """ return _ida_hexrays.hexrays_alloc(*args) def hexrays_free(*args): """ hexrays_free(ptr) """ return _ida_hexrays.hexrays_free(*args) MAX_VLR_SIZE = _ida_hexrays.MAX_VLR_SIZE CMP_NZ = _ida_hexrays.CMP_NZ CMP_Z = _ida_hexrays.CMP_Z CMP_AE = _ida_hexrays.CMP_AE CMP_B = _ida_hexrays.CMP_B CMP_A = _ida_hexrays.CMP_A CMP_BE = _ida_hexrays.CMP_BE CMP_GT = _ida_hexrays.CMP_GT CMP_GE = _ida_hexrays.CMP_GE CMP_LT = _ida_hexrays.CMP_LT CMP_LE = _ida_hexrays.CMP_LE valrng_t_swigregister = _ida_hexrays.valrng_t_swigregister valrng_t_swigregister(valrng_t) cvar = _ida_hexrays.cvar MAX_VALUE = cvar.MAX_VALUE MAX_SVALUE = cvar.MAX_SVALUE MIN_SVALUE = cvar.MIN_SVALUE NO_ACCESS = _ida_hexrays.NO_ACCESS WRITE_ACCESS = _ida_hexrays.WRITE_ACCESS READ_ACCESS = _ida_hexrays.READ_ACCESS RW_ACCESS = _ida_hexrays.RW_ACCESS def is_may_access(*args): """ is_may_access(maymust) -> bool """ return _ida_hexrays.is_may_access(*args) MERR_OK = _ida_hexrays.MERR_OK MERR_BLOCK = _ida_hexrays.MERR_BLOCK MERR_INTERR = _ida_hexrays.MERR_INTERR MERR_INSN = _ida_hexrays.MERR_INSN MERR_MEM = _ida_hexrays.MERR_MEM MERR_BADBLK = _ida_hexrays.MERR_BADBLK MERR_BADSP = _ida_hexrays.MERR_BADSP MERR_PROLOG = _ida_hexrays.MERR_PROLOG MERR_SWITCH = _ida_hexrays.MERR_SWITCH MERR_EXCEPTION = _ida_hexrays.MERR_EXCEPTION MERR_HUGESTACK = _ida_hexrays.MERR_HUGESTACK MERR_LVARS = _ida_hexrays.MERR_LVARS MERR_BITNESS = _ida_hexrays.MERR_BITNESS MERR_BADCALL = _ida_hexrays.MERR_BADCALL MERR_BADFRAME = _ida_hexrays.MERR_BADFRAME MERR_UNKTYPE = _ida_hexrays.MERR_UNKTYPE MERR_BADIDB = _ida_hexrays.MERR_BADIDB MERR_SIZEOF = _ida_hexrays.MERR_SIZEOF MERR_REDO = _ida_hexrays.MERR_REDO MERR_CANCELED = _ida_hexrays.MERR_CANCELED MERR_RECDEPTH = _ida_hexrays.MERR_RECDEPTH MERR_OVERLAP = _ida_hexrays.MERR_OVERLAP MERR_PARTINIT = _ida_hexrays.MERR_PARTINIT MERR_COMPLEX = _ida_hexrays.MERR_COMPLEX MERR_LICENSE = _ida_hexrays.MERR_LICENSE MERR_ONLY32 = _ida_hexrays.MERR_ONLY32 MERR_ONLY64 = _ida_hexrays.MERR_ONLY64 MERR_BUSY = _ida_hexrays.MERR_BUSY MERR_FARPTR = _ida_hexrays.MERR_FARPTR MERR_EXTERN = _ida_hexrays.MERR_EXTERN MERR_FUNCSIZE = _ida_hexrays.MERR_FUNCSIZE MERR_BADRANGES = _ida_hexrays.MERR_BADRANGES MERR_STOP = _ida_hexrays.MERR_STOP MERR_MAX_ERR = _ida_hexrays.MERR_MAX_ERR MERR_LOOP = _ida_hexrays.MERR_LOOP def get_merror_desc(*args): """ get_merror_desc(code, mba) -> ea_t Get textual description of an error code @param code: Microcode error codes (C++: merror_t) @param mba: the microcode array (C++: mbl_array_t *) @return: the error address """ return _ida_hexrays.get_merror_desc(*args) def reg2mreg(*args): """ reg2mreg(reg) -> mreg_t Map a processor register to microregister. @param reg: processor register number (C++: int) @return: microregister register id or mr_none """ return _ida_hexrays.reg2mreg(*args) def mreg2reg(*args): """ mreg2reg(reg, width) -> int Map a microregister to processor register. @param reg: microregister number (C++: mreg_t) @param width: size of microregister in bytes (C++: int) @return: processor register id or -1 """ return _ida_hexrays.mreg2reg(*args) optinsn_t_swigregister = _ida_hexrays.optinsn_t_swigregister optinsn_t_swigregister(optinsn_t) MUST_ACCESS = cvar.MUST_ACCESS MAY_ACCESS = cvar.MAY_ACCESS MAYMUST_ACCESS_MASK = cvar.MAYMUST_ACCESS_MASK ONE_ACCESS_TYPE = cvar.ONE_ACCESS_TYPE INCLUDE_SPOILED_REGS = cvar.INCLUDE_SPOILED_REGS EXCLUDE_PASS_REGS = cvar.EXCLUDE_PASS_REGS FULL_XDSU = cvar.FULL_XDSU WITH_ASSERTS = cvar.WITH_ASSERTS EXCLUDE_VOLATILE = cvar.EXCLUDE_VOLATILE INCLUDE_UNUSED_SRC = cvar.INCLUDE_UNUSED_SRC INCLUDE_DEAD_RETREGS = cvar.INCLUDE_DEAD_RETREGS INCLUDE_RESTRICTED = cvar.INCLUDE_RESTRICTED CALL_SPOILS_ONLY_ARGS = cvar.CALL_SPOILS_ONLY_ARGS optblock_t_swigregister = _ida_hexrays.optblock_t_swigregister optblock_t_swigregister(optblock_t) m_nop = _ida_hexrays.m_nop m_stx = _ida_hexrays.m_stx m_ldx = _ida_hexrays.m_ldx m_ldc = _ida_hexrays.m_ldc m_mov = _ida_hexrays.m_mov m_neg = _ida_hexrays.m_neg m_lnot = _ida_hexrays.m_lnot m_bnot = _ida_hexrays.m_bnot m_xds = _ida_hexrays.m_xds m_xdu = _ida_hexrays.m_xdu m_low = _ida_hexrays.m_low m_high = _ida_hexrays.m_high m_add = _ida_hexrays.m_add m_sub = _ida_hexrays.m_sub m_mul = _ida_hexrays.m_mul m_udiv = _ida_hexrays.m_udiv m_sdiv = _ida_hexrays.m_sdiv m_umod = _ida_hexrays.m_umod m_smod = _ida_hexrays.m_smod m_or = _ida_hexrays.m_or m_and = _ida_hexrays.m_and m_xor = _ida_hexrays.m_xor m_shl = _ida_hexrays.m_shl m_shr = _ida_hexrays.m_shr m_sar = _ida_hexrays.m_sar m_cfadd = _ida_hexrays.m_cfadd m_ofadd = _ida_hexrays.m_ofadd m_cfshl = _ida_hexrays.m_cfshl m_cfshr = _ida_hexrays.m_cfshr m_sets = _ida_hexrays.m_sets m_seto = _ida_hexrays.m_seto m_setp = _ida_hexrays.m_setp m_setnz = _ida_hexrays.m_setnz m_setz = _ida_hexrays.m_setz m_setae = _ida_hexrays.m_setae m_setb = _ida_hexrays.m_setb m_seta = _ida_hexrays.m_seta m_setbe = _ida_hexrays.m_setbe m_setg = _ida_hexrays.m_setg m_setge = _ida_hexrays.m_setge m_setl = _ida_hexrays.m_setl m_setle = _ida_hexrays.m_setle m_jcnd = _ida_hexrays.m_jcnd m_jnz = _ida_hexrays.m_jnz m_jz = _ida_hexrays.m_jz m_jae = _ida_hexrays.m_jae m_jb = _ida_hexrays.m_jb m_ja = _ida_hexrays.m_ja m_jbe = _ida_hexrays.m_jbe m_jg = _ida_hexrays.m_jg m_jge = _ida_hexrays.m_jge m_jl = _ida_hexrays.m_jl m_jle = _ida_hexrays.m_jle m_jtbl = _ida_hexrays.m_jtbl m_ijmp = _ida_hexrays.m_ijmp m_goto = _ida_hexrays.m_goto m_call = _ida_hexrays.m_call m_icall = _ida_hexrays.m_icall m_ret = _ida_hexrays.m_ret m_push = _ida_hexrays.m_push m_pop = _ida_hexrays.m_pop m_und = _ida_hexrays.m_und m_ext = _ida_hexrays.m_ext m_f2i = _ida_hexrays.m_f2i m_f2u = _ida_hexrays.m_f2u m_i2f = _ida_hexrays.m_i2f m_u2f = _ida_hexrays.m_u2f m_f2f = _ida_hexrays.m_f2f m_fneg = _ida_hexrays.m_fneg m_fadd = _ida_hexrays.m_fadd m_fsub = _ida_hexrays.m_fsub m_fmul = _ida_hexrays.m_fmul m_fdiv = _ida_hexrays.m_fdiv def must_mcode_close_block(*args): """ must_mcode_close_block(mcode, including_calls) -> bool Must an instruction with the given opcode be the last one in a block? Such opcodes are called closing opcodes. @param mcode: instruction opcode (C++: mcode_t) @param including_calls: should m_call/m_icall be considered as the closing opcodes? If this function returns true, the opcode cannot appear in the middle of a block. Calls are a special case because before MMAT_CALLS they are closing opcodes. Afteer MMAT_CALLS that are not considered as closing opcodes. (C++: bool) """ return _ida_hexrays.must_mcode_close_block(*args) def is_mcode_propagatable(*args): """ is_mcode_propagatable(mcode) -> bool May opcode be propagated? Such opcodes can be used in sub-instructions (nested instructions) There is a handful of non-propagatable opcodes, like jumps, ret, nop, etc All other regular opcodes are propagatable and may appear in a nested instruction. @param mcode (C++: mcode_t) """ return _ida_hexrays.is_mcode_propagatable(*args) def is_mcode_addsub(*args): """ is_mcode_addsub(mcode) -> bool """ return _ida_hexrays.is_mcode_addsub(*args) def is_mcode_xdsu(*args): """ is_mcode_xdsu(mcode) -> bool """ return _ida_hexrays.is_mcode_xdsu(*args) def is_mcode_set(*args): """ is_mcode_set(mcode) -> bool """ return _ida_hexrays.is_mcode_set(*args) def is_mcode_set1(*args): """ is_mcode_set1(mcode) -> bool """ return _ida_hexrays.is_mcode_set1(*args) def is_mcode_j1(*args): """ is_mcode_j1(mcode) -> bool """ return _ida_hexrays.is_mcode_j1(*args) def is_mcode_jcond(*args): """ is_mcode_jcond(mcode) -> bool """ return _ida_hexrays.is_mcode_jcond(*args) def is_mcode_convertible_to_jmp(*args): """ is_mcode_convertible_to_jmp(mcode) -> bool """ return _ida_hexrays.is_mcode_convertible_to_jmp(*args) def is_mcode_convertible_to_set(*args): """ is_mcode_convertible_to_set(mcode) -> bool """ return _ida_hexrays.is_mcode_convertible_to_set(*args) def is_mcode_call(*args): """ is_mcode_call(mcode) -> bool """ return _ida_hexrays.is_mcode_call(*args) def is_mcode_fpu(*args): """ is_mcode_fpu(mcode) -> bool """ return _ida_hexrays.is_mcode_fpu(*args) def is_mcode_commutative(*args): """ is_mcode_commutative(mcode) -> bool """ return _ida_hexrays.is_mcode_commutative(*args) def is_mcode_shift(*args): """ is_mcode_shift(mcode) -> bool """ return _ida_hexrays.is_mcode_shift(*args) def is_mcode_divmod(*args): """ is_mcode_divmod(op) -> bool """ return _ida_hexrays.is_mcode_divmod(*args) def set2jcnd(*args): """ set2jcnd(code) -> mcode_t """ return _ida_hexrays.set2jcnd(*args) def jcnd2set(*args): """ jcnd2set(code) -> mcode_t """ return _ida_hexrays.jcnd2set(*args) def negate_mcode_relation(*args): """ negate_mcode_relation(code) -> mcode_t """ return _ida_hexrays.negate_mcode_relation(*args) def swap_mcode_relation(*args): """ swap_mcode_relation(code) -> mcode_t """ return _ida_hexrays.swap_mcode_relation(*args) def get_signed_mcode(*args): """ get_signed_mcode(code) -> mcode_t """ return _ida_hexrays.get_signed_mcode(*args) def get_unsigned_mcode(*args): """ get_unsigned_mcode(code) -> mcode_t """ return _ida_hexrays.get_unsigned_mcode(*args) def is_signed_mcode(*args): """ is_signed_mcode(code) -> bool """ return _ida_hexrays.is_signed_mcode(*args) def is_unsigned_mcode(*args): """ is_unsigned_mcode(code) -> bool """ return _ida_hexrays.is_unsigned_mcode(*args) def mcode_modifies_d(*args): """ mcode_modifies_d(mcode) -> bool """ return _ida_hexrays.mcode_modifies_d(*args) operand_locator_t_swigregister = _ida_hexrays.operand_locator_t_swigregister operand_locator_t_swigregister(operand_locator_t) mr_none = cvar.mr_none mr_cf = cvar.mr_cf mr_zf = cvar.mr_zf mr_sf = cvar.mr_sf mr_of = cvar.mr_of mr_pf = cvar.mr_pf cc_count = cvar.cc_count mr_cc = cvar.mr_cc mr_first = cvar.mr_first number_format_t_swigregister = _ida_hexrays.number_format_t_swigregister number_format_t_swigregister(number_format_t) NF_FIXED = _ida_hexrays.NF_FIXED """ number format has been defined by the user """ NF_NEGDONE = _ida_hexrays.NF_NEGDONE """ temporary internal bit: negation has been performed """ NF_BINVDONE = _ida_hexrays.NF_BINVDONE """ temporary internal bit: inverting bits is done """ NF_NEGATE = _ida_hexrays.NF_NEGATE """ The user asked to negate the constant. """ NF_BITNOT = _ida_hexrays.NF_BITNOT """ The user asked to invert bits of the constant. """ NF_STROFF = _ida_hexrays.NF_STROFF """ internal bit: used as stroff, valid iff 'is_stroff()' """ vd_printer_t_swigregister = _ida_hexrays.vd_printer_t_swigregister vd_printer_t_swigregister(vd_printer_t) vc_printer_t_swigregister = _ida_hexrays.vc_printer_t_swigregister vc_printer_t_swigregister(vc_printer_t) qstring_printer_t_swigregister = _ida_hexrays.qstring_printer_t_swigregister qstring_printer_t_swigregister(qstring_printer_t) def dstr(*args): """ dstr(tif) -> char const * Print the specified type info. This function can be used from a debugger by typing "tif->dstr()" @param tif (C++: const tinfo_t *) """ return _ida_hexrays.dstr(*args) def is_type_correct(*args): """ is_type_correct(ptr) -> bool Verify a type string. @param ptr (C++: const type_t *) @return: true if type string is correct """ return _ida_hexrays.is_type_correct(*args) def is_small_udt(*args): """ is_small_udt(tif) -> bool Is a small structure or union? @param tif (C++: const tinfo_t &) @return: true if the type is a small UDT (user defined type). Small UDTs fit into a register (or pair or registers) as a rule. """ return _ida_hexrays.is_small_udt(*args) def is_nonbool_type(*args): """ is_nonbool_type(type) -> bool Is definitely a non-boolean type? @param type (C++: const tinfo_t &) @return: true if the type is a non-boolean type (non bool and well defined) """ return _ida_hexrays.is_nonbool_type(*args) def is_bool_type(*args): """ is_bool_type(type) -> bool Is a boolean type? @param type (C++: const tinfo_t &) @return: true if the type is a boolean type """ return _ida_hexrays.is_bool_type(*args) def is_ptr_or_array(*args): """ is_ptr_or_array(t) -> bool Is a pointer or array type? @param t (C++: type_t) """ return _ida_hexrays.is_ptr_or_array(*args) def is_paf(*args): """ is_paf(t) -> bool Is a pointer, array, or function type? @param t (C++: type_t) """ return _ida_hexrays.is_paf(*args) def is_inplace_def(*args): """ is_inplace_def(type) -> bool Is struct/union/enum definition (not declaration)? @param type (C++: const tinfo_t &) """ return _ida_hexrays.is_inplace_def(*args) def partial_type_num(*args): """ partial_type_num(type) -> int Calculate number of partial subtypes. @param type (C++: const tinfo_t &) @return: number of partial subtypes. The bigger is this number, the uglier is the type. """ return _ida_hexrays.partial_type_num(*args) def get_float_type(*args): """ get_float_type(width) -> tinfo_t Get a type of a floating point value with the specified width @param width: width of the desired type (C++: int) @return: type info object """ return _ida_hexrays.get_float_type(*args) def get_int_type_by_width_and_sign(*args): """ get_int_type_by_width_and_sign(srcwidth, sign) -> tinfo_t Create a type info by width and sign. Returns a simple type (examples: int, short) with the given width and sign. @param srcwidth: size of the type in bytes (C++: int) @param sign: sign of the type (C++: type_sign_t) """ return _ida_hexrays.get_int_type_by_width_and_sign(*args) def get_unk_type(*args): """ get_unk_type(size) -> tinfo_t Create a partial type info by width. Returns a partially defined type (examples: _DWORD, _BYTE) with the given width. @param size: size of the type in bytes (C++: int) """ return _ida_hexrays.get_unk_type(*args) def dummy_ptrtype(*args): """ dummy_ptrtype(ptrsize, isfp) -> tinfo_t Generate a dummy pointer type @param ptrsize: size of pointed object (C++: int) @param isfp: is floating point object? (C++: bool) """ return _ida_hexrays.dummy_ptrtype(*args) def get_member_type(*args): """ get_member_type(mptr, type) -> bool Get type of a structure field. This function performs validity checks of the field type. Wrong types are rejected. @param mptr: structure field (C++: const member_t *) @param type: pointer to the variable where the type is returned. This parameter can be NULL. (C++: tinfo_t *) @return: false if failed """ return _ida_hexrays.get_member_type(*args) def make_pointer(*args): """ make_pointer(type) -> tinfo_t Create a pointer type. This function performs the following conversion: "type" -> "type*" @param type: object type. (C++: const tinfo_t &) @return: "type*". for example, if 'char' is passed as the argument, """ return _ida_hexrays.make_pointer(*args) def create_typedef(*args): """ create_typedef(name) -> tinfo_t create_typedef(n) -> tinfo_t Create a reference to a named type. @param name: type name (C++: const char *) @return: type which refers to the specified name. For example, if name is "DWORD", the type info which refers to "DWORD" is created. """ return _ida_hexrays.create_typedef(*args) GUESSED_NONE = _ida_hexrays.GUESSED_NONE GUESSED_WEAK = _ida_hexrays.GUESSED_WEAK GUESSED_FUNC = _ida_hexrays.GUESSED_FUNC GUESSED_DATA = _ida_hexrays.GUESSED_DATA TS_NOELL = _ida_hexrays.TS_NOELL TS_SHRINK = _ida_hexrays.TS_SHRINK TS_DONTREF = _ida_hexrays.TS_DONTREF TS_MASK = _ida_hexrays.TS_MASK def get_type(*args): """ get_type(id, tif, guess) -> bool Get a global type. Global types are types of addressable objects and struct/union/enum types @param id: address or id of the object (C++: uval_t) @param tif: buffer for the answer (C++: tinfo_t *) @param guess: what kind of types to consider (C++: type_source_t) @return: success """ return _ida_hexrays.get_type(*args) def set_type(*args): """ set_type(id, tif, source, force=False) -> bool Set a global type. @param id: address or id of the object (C++: uval_t) @param tif: new type info (C++: const tinfo_t &) @param source: where the type comes from (C++: type_source_t) @param force: true means to set the type as is, false means to merge the new type with the possibly existing old type info. (C++: bool) @return: success """ return _ida_hexrays.set_type(*args) vdloc_t_swigregister = _ida_hexrays.vdloc_t_swigregister vdloc_t_swigregister(vdloc_t) def print_vdloc(*args): """ print_vdloc(loc, nbytes) Print vdloc. Since vdloc does not always carry the size info, we pass it as NBYTES.. @param loc (C++: const vdloc_t &) @param nbytes (C++: int) """ return _ida_hexrays.print_vdloc(*args) def arglocs_overlap(*args): """ arglocs_overlap(loc1, w1, loc2, w2) -> bool Do two arglocs overlap? @param loc1 (C++: const vdloc_t &) @param w1 (C++: size_t) @param loc2 (C++: const vdloc_t &) @param w2 (C++: size_t) """ return _ida_hexrays.arglocs_overlap(*args) lvar_locator_t_swigregister = _ida_hexrays.lvar_locator_t_swigregister lvar_locator_t_swigregister(lvar_locator_t) lvar_t_swigregister = _ida_hexrays.lvar_t_swigregister lvar_t_swigregister(lvar_t) SVW_INT = _ida_hexrays.SVW_INT SVW_FLOAT = _ida_hexrays.SVW_FLOAT SVW_SOFT = _ida_hexrays.SVW_SOFT lvars_t_swigregister = _ida_hexrays.lvars_t_swigregister lvars_t_swigregister(lvars_t) lvar_saved_info_t_swigregister = _ida_hexrays.lvar_saved_info_t_swigregister lvar_saved_info_t_swigregister(lvar_saved_info_t) LVINF_KEEP = _ida_hexrays.LVINF_KEEP """ preserve saved user settings regardless of vars for example, if a var loses all its user-defined attributes or even gets destroyed, keep its 'lvar_saved_info_t' . this is used for ephemeral variables that get destroyed by macro recognition. """ LVINF_FORCE = _ida_hexrays.LVINF_FORCE """ force allocation of a new variable. forces the decompiler to create a new variable at ll.defea """ LVINF_NOPTR = _ida_hexrays.LVINF_NOPTR """ variable type should not be a pointer """ lvar_uservec_t_swigregister = _ida_hexrays.lvar_uservec_t_swigregister lvar_uservec_t_swigregister(lvar_uservec_t) ULV_PRECISE_DEFEA = _ida_hexrays.ULV_PRECISE_DEFEA """ Use precise defea's for lvar locations. """ def restore_user_lvar_settings(*args): """ restore_user_lvar_settings(lvinf, func_ea) -> bool Restore user defined local variable settings in the database. @param lvinf: ptr to output buffer (C++: lvar_uservec_t *) @param func_ea: entry address of the function (C++: ea_t) @return: success """ return _ida_hexrays.restore_user_lvar_settings(*args) def save_user_lvar_settings(*args): """ save_user_lvar_settings(func_ea, lvinf) Save user defined local variable settings into the database. @param func_ea: entry address of the function (C++: ea_t) @param lvinf: user-specified info about local variables (C++: const lvar_uservec_t &) """ return _ida_hexrays.save_user_lvar_settings(*args) user_lvar_modifier_t_swigregister = _ida_hexrays.user_lvar_modifier_t_swigregister user_lvar_modifier_t_swigregister(user_lvar_modifier_t) def modify_user_lvars(*args): """ modify_user_lvars(entry_ea, mlv) -> bool Modify saved local variable settings. @param entry_ea: function start address (C++: ea_t) @param mlv: local variable modifier (C++: user_lvar_modifier_t &) @return: true if modified variables """ return _ida_hexrays.modify_user_lvars(*args) udcall_t_swigregister = _ida_hexrays.udcall_t_swigregister udcall_t_swigregister(udcall_t) def restore_user_defined_calls(*args): """ restore_user_defined_calls(udcalls, func_ea) -> bool Restore user defined function calls from the database. @param udcalls: ptr to output buffer (C++: udcall_map_t *) @param func_ea: entry address of the function (C++: ea_t) @return: success """ return _ida_hexrays.restore_user_defined_calls(*args) def save_user_defined_calls(*args): """ save_user_defined_calls(func_ea, udcalls) Save user defined local function calls into the database. @param func_ea: entry address of the function (C++: ea_t) @param udcalls: user-specified info about user defined function calls (C++: const udcall_map_t &) """ return _ida_hexrays.save_user_defined_calls(*args) def parse_user_call(*args): """ parse_user_call(udc, decl, silent) -> bool Convert function type declaration into internal structure @param udc: - pointer to output structure (C++: udcall_t *) @param decl: - function type declaration (C++: const char *) @param silent: - if TRUE: do not show warning in case of incorrect type (C++: bool) @return: success """ return _ida_hexrays.parse_user_call(*args) def convert_to_user_call(*args): """ convert_to_user_call(udc, cdg) -> merror_t try to generate user-defined call for an instruction @param udc (C++: const udcall_t &) @param cdg (C++: codegen_t &) @return: Microcode error codes code: MERR_OK - user-defined call generated else - error (MERR_INSN == inacceptable udc.tif) """ return _ida_hexrays.convert_to_user_call(*args) microcode_filter_t_swigregister = _ida_hexrays.microcode_filter_t_swigregister microcode_filter_t_swigregister(microcode_filter_t) def install_microcode_filter(*args): """ install_microcode_filter(filter, install=True) register/unregister non-standard microcode generator @param filter: - microcode generator object (C++: microcode_filter_t *) @param install: - TRUE - register the object, FALSE - unregister (C++: bool) """ return _ida_hexrays.install_microcode_filter(*args) udc_filter_t_swigregister = _ida_hexrays.udc_filter_t_swigregister udc_filter_t_swigregister(udc_filter_t) bitset_t_swigregister = _ida_hexrays.bitset_t_swigregister bitset_t_swigregister(bitset_t) bitset_width = cvar.bitset_width bitset_align = cvar.bitset_align bitset_shift = cvar.bitset_shift ivl_t_swigregister = _ida_hexrays.ivl_t_swigregister ivl_t_swigregister(ivl_t) ivl_with_name_t_swigregister = _ida_hexrays.ivl_with_name_t_swigregister ivl_with_name_t_swigregister(ivl_with_name_t) ivlset_t_swigregister = _ida_hexrays.ivlset_t_swigregister ivlset_t_swigregister(ivlset_t) def get_mreg_name(*args): """ get_mreg_name(bit, width, ud=None) -> int """ return _ida_hexrays.get_mreg_name(*args) rlist_t_swigregister = _ida_hexrays.rlist_t_swigregister rlist_t_swigregister(rlist_t) mlist_t_swigregister = _ida_hexrays.mlist_t_swigregister mlist_t_swigregister(mlist_t) simple_graph_t_swigregister = _ida_hexrays.simple_graph_t_swigregister simple_graph_t_swigregister(simple_graph_t) op_parent_info_t_swigregister = _ida_hexrays.op_parent_info_t_swigregister op_parent_info_t_swigregister(op_parent_info_t) minsn_visitor_t_swigregister = _ida_hexrays.minsn_visitor_t_swigregister minsn_visitor_t_swigregister(minsn_visitor_t) mop_visitor_t_swigregister = _ida_hexrays.mop_visitor_t_swigregister mop_visitor_t_swigregister(mop_visitor_t) scif_visitor_t_swigregister = _ida_hexrays.scif_visitor_t_swigregister scif_visitor_t_swigregister(scif_visitor_t) mlist_mop_visitor_t_swigregister = _ida_hexrays.mlist_mop_visitor_t_swigregister mlist_mop_visitor_t_swigregister(mlist_mop_visitor_t) lvar_ref_t_swigregister = _ida_hexrays.lvar_ref_t_swigregister lvar_ref_t_swigregister(lvar_ref_t) mop_z = cvar.mop_z mop_r = cvar.mop_r mop_n = cvar.mop_n mop_str = cvar.mop_str mop_d = cvar.mop_d mop_S = cvar.mop_S mop_v = cvar.mop_v mop_b = cvar.mop_b mop_f = cvar.mop_f mop_l = cvar.mop_l mop_a = cvar.mop_a mop_h = cvar.mop_h mop_c = cvar.mop_c mop_fn = cvar.mop_fn mop_p = cvar.mop_p mop_sc = cvar.mop_sc NOSIZE = cvar.NOSIZE stkvar_ref_t_swigregister = _ida_hexrays.stkvar_ref_t_swigregister stkvar_ref_t_swigregister(stkvar_ref_t) scif_t_swigregister = _ida_hexrays.scif_t_swigregister scif_t_swigregister(scif_t) mnumber_t_swigregister = _ida_hexrays.mnumber_t_swigregister mnumber_t_swigregister(mnumber_t) fnumber_t_swigregister = _ida_hexrays.fnumber_t_swigregister fnumber_t_swigregister(fnumber_t) SHINS_NUMADDR = _ida_hexrays.SHINS_NUMADDR """ display definition addresses for numbers """ SHINS_VALNUM = _ida_hexrays.SHINS_VALNUM """ display value numbers """ SHINS_SHORT = _ida_hexrays.SHINS_SHORT """ do not display use-def chains and other attrs """ SHINS_LDXEA = _ida_hexrays.SHINS_LDXEA """ display address of ldx expressions (not used) """ NO_SIDEFF = _ida_hexrays.NO_SIDEFF WITH_SIDEFF = _ida_hexrays.WITH_SIDEFF ONLY_SIDEFF = _ida_hexrays.ONLY_SIDEFF ANY_REGSIZE = _ida_hexrays.ANY_REGSIZE mop_t_swigregister = _ida_hexrays.mop_t_swigregister mop_t_swigregister(mop_t) MAX_OPSIZE = cvar.MAX_OPSIZE DOUBLE_OPSIZE = cvar.DOUBLE_OPSIZE OPROP_IMPDONE = _ida_hexrays.OPROP_IMPDONE """ imported operand (a pointer) has been dereferenced """ OPROP_UDT = _ida_hexrays.OPROP_UDT """ a struct or union """ OPROP_FLOAT = _ida_hexrays.OPROP_FLOAT """ possibly floating value """ OPROP_CCFLAGS = _ida_hexrays.OPROP_CCFLAGS """ condition codes register value """ OPROP_UDEFVAL = _ida_hexrays.OPROP_UDEFVAL """ uses undefined value """ def lexcompare(*args): """ lexcompare(a, b) -> int """ return _ida_hexrays.lexcompare(*args) mop_pair_t_swigregister = _ida_hexrays.mop_pair_t_swigregister mop_pair_t_swigregister(mop_pair_t) mop_addr_t_swigregister = _ida_hexrays.mop_addr_t_swigregister mop_addr_t_swigregister(mop_addr_t) mcallarg_t_swigregister = _ida_hexrays.mcallarg_t_swigregister mcallarg_t_swigregister(mcallarg_t) ROLE_UNK = _ida_hexrays.ROLE_UNK ROLE_EMPTY = _ida_hexrays.ROLE_EMPTY ROLE_MEMSET = _ida_hexrays.ROLE_MEMSET ROLE_MEMSET32 = _ida_hexrays.ROLE_MEMSET32 ROLE_MEMSET64 = _ida_hexrays.ROLE_MEMSET64 ROLE_MEMCPY = _ida_hexrays.ROLE_MEMCPY ROLE_STRCPY = _ida_hexrays.ROLE_STRCPY ROLE_STRLEN = _ida_hexrays.ROLE_STRLEN ROLE_STRCAT = _ida_hexrays.ROLE_STRCAT ROLE_TAIL = _ida_hexrays.ROLE_TAIL ROLE_BUG = _ida_hexrays.ROLE_BUG ROLE_ALLOCA = _ida_hexrays.ROLE_ALLOCA ROLE_BSWAP = _ida_hexrays.ROLE_BSWAP ROLE_PRESENT = _ida_hexrays.ROLE_PRESENT ROLE_CONTAINING_RECORD = _ida_hexrays.ROLE_CONTAINING_RECORD ROLE_FASTFAIL = _ida_hexrays.ROLE_FASTFAIL ROLE_READFLAGS = _ida_hexrays.ROLE_READFLAGS ROLE_IS_MUL_OK = _ida_hexrays.ROLE_IS_MUL_OK ROLE_SATURATED_MUL = _ida_hexrays.ROLE_SATURATED_MUL ROLE_BITTEST = _ida_hexrays.ROLE_BITTEST ROLE_BITTESTANDSET = _ida_hexrays.ROLE_BITTESTANDSET ROLE_BITTESTANDRESET = _ida_hexrays.ROLE_BITTESTANDRESET ROLE_BITTESTANDCOMPLEMENT = _ida_hexrays.ROLE_BITTESTANDCOMPLEMENT ROLE_VA_ARG = _ida_hexrays.ROLE_VA_ARG ROLE_VA_COPY = _ida_hexrays.ROLE_VA_COPY ROLE_VA_START = _ida_hexrays.ROLE_VA_START ROLE_VA_END = _ida_hexrays.ROLE_VA_END ROLE_ROL = _ida_hexrays.ROLE_ROL ROLE_ROR = _ida_hexrays.ROLE_ROR ROLE_CFSUB3 = _ida_hexrays.ROLE_CFSUB3 ROLE_OFSUB3 = _ida_hexrays.ROLE_OFSUB3 ROLE_ABS = _ida_hexrays.ROLE_ABS FUNC_NAME_MEMCPY = _ida_hexrays.FUNC_NAME_MEMCPY FUNC_NAME_MEMSET = _ida_hexrays.FUNC_NAME_MEMSET FUNC_NAME_MEMSET32 = _ida_hexrays.FUNC_NAME_MEMSET32 FUNC_NAME_MEMSET64 = _ida_hexrays.FUNC_NAME_MEMSET64 FUNC_NAME_STRCPY = _ida_hexrays.FUNC_NAME_STRCPY FUNC_NAME_STRLEN = _ida_hexrays.FUNC_NAME_STRLEN FUNC_NAME_STRCAT = _ida_hexrays.FUNC_NAME_STRCAT FUNC_NAME_TAIL = _ida_hexrays.FUNC_NAME_TAIL FUNC_NAME_VA_ARG = _ida_hexrays.FUNC_NAME_VA_ARG FUNC_NAME_EMPTY = _ida_hexrays.FUNC_NAME_EMPTY FUNC_NAME_PRESENT = _ida_hexrays.FUNC_NAME_PRESENT FUNC_NAME_CONTAINING_RECORD = _ida_hexrays.FUNC_NAME_CONTAINING_RECORD mcallinfo_t_swigregister = _ida_hexrays.mcallinfo_t_swigregister mcallinfo_t_swigregister(mcallinfo_t) FCI_PROP = _ida_hexrays.FCI_PROP """ call has been propagated """ FCI_DEAD = _ida_hexrays.FCI_DEAD """ some return registers were determined dead """ FCI_FINAL = _ida_hexrays.FCI_FINAL """ call type is final, should not be changed """ FCI_NORET = _ida_hexrays.FCI_NORET """ call does not return """ FCI_PURE = _ida_hexrays.FCI_PURE """ pure function """ FCI_NOSIDE = _ida_hexrays.FCI_NOSIDE """ call does not have side effects """ FCI_SPLOK = _ida_hexrays.FCI_SPLOK """ spoiled/visible_memory lists have been optimized. for some functions we can reduce them as soon as information about the arguments becomes available. in order not to try optimize them again we use this bit. """ FCI_HASCALL = _ida_hexrays.FCI_HASCALL """ A function is an synthetic helper combined from several instructions and at least one of them was a call to a real functions """ FCI_HASFMT = _ida_hexrays.FCI_HASFMT """ printf- or scanf-style format string A variadic function with recognized """ mcases_t_swigregister = _ida_hexrays.mcases_t_swigregister mcases_t_swigregister(mcases_t) voff_t_swigregister = _ida_hexrays.voff_t_swigregister voff_t_swigregister(voff_t) vivl_t_swigregister = _ida_hexrays.vivl_t_swigregister vivl_t_swigregister(vivl_t) chain_t_swigregister = _ida_hexrays.chain_t_swigregister chain_t_swigregister(chain_t) CHF_INITED = _ida_hexrays.CHF_INITED """ is chain initialized? (valid only after lvar allocation) """ CHF_REPLACED = _ida_hexrays.CHF_REPLACED """ chain operands have been replaced? """ CHF_OVER = _ida_hexrays.CHF_OVER """ overlapped chain """ CHF_FAKE = _ida_hexrays.CHF_FAKE """ fake chain created by widen_chains() """ CHF_PASSTHRU = _ida_hexrays.CHF_PASSTHRU """ pass-thru chain, must use the input variable to the block """ CHF_TERM = _ida_hexrays.CHF_TERM """ terminating chain; the variable does not survive across the block """ SIZEOF_BLOCK_CHAINS = _ida_hexrays.SIZEOF_BLOCK_CHAINS block_chains_t_swigregister = _ida_hexrays.block_chains_t_swigregister block_chains_t_swigregister(block_chains_t) chain_visitor_t_swigregister = _ida_hexrays.chain_visitor_t_swigregister chain_visitor_t_swigregister(chain_visitor_t) graph_chains_t_swigregister = _ida_hexrays.graph_chains_t_swigregister graph_chains_t_swigregister(graph_chains_t) GCA_EMPTY = _ida_hexrays.GCA_EMPTY """ include empty chains """ GCA_SPEC = _ida_hexrays.GCA_SPEC """ include chains for special registers """ GCA_ALLOC = _ida_hexrays.GCA_ALLOC """ enumerate only allocated chains """ GCA_NALLOC = _ida_hexrays.GCA_NALLOC """ enumerate only non-allocated chains """ GCA_OFIRST = _ida_hexrays.GCA_OFIRST """ consider only chains of the first block """ GCA_OLAST = _ida_hexrays.GCA_OLAST """ consider only chains of the last block """ minsn_t_swigregister = _ida_hexrays.minsn_t_swigregister minsn_t_swigregister(minsn_t) IPROP_OPTIONAL = _ida_hexrays.IPROP_OPTIONAL """ optional instruction """ IPROP_PERSIST = _ida_hexrays.IPROP_PERSIST """ persistent insn; they are not destroyed """ IPROP_WILDMATCH = _ida_hexrays.IPROP_WILDMATCH """ match multiple insns """ IPROP_CLNPOP = _ida_hexrays.IPROP_CLNPOP """ (e.g. "pop ecx" is often used for that) the purpose of the instruction is to clean stack """ IPROP_FPINSN = _ida_hexrays.IPROP_FPINSN """ floating point insn """ IPROP_FARCALL = _ida_hexrays.IPROP_FARCALL """ call of a far function using push cs/call sequence """ IPROP_TAILCALL = _ida_hexrays.IPROP_TAILCALL """ tail call """ IPROP_ASSERT = _ida_hexrays.IPROP_ASSERT """ assertion: usually mov #val, op. assertions are used to help the optimizer. assertions are ignored when generating ctree """ IPROP_SPLIT = _ida_hexrays.IPROP_SPLIT """ the instruction has been split: """ IPROP_SPLIT1 = _ida_hexrays.IPROP_SPLIT1 """ into 1 byte """ IPROP_SPLIT2 = _ida_hexrays.IPROP_SPLIT2 """ into 2 bytes """ IPROP_SPLIT4 = _ida_hexrays.IPROP_SPLIT4 """ into 4 bytes """ IPROP_SPLIT8 = _ida_hexrays.IPROP_SPLIT8 """ into 8 bytes """ IPROP_COMBINED = _ida_hexrays.IPROP_COMBINED """ insn has been modified because of a partial reference """ IPROP_EXTSTX = _ida_hexrays.IPROP_EXTSTX """ this is m_ext propagated into m_stx """ IPROP_IGNLOWSRC = _ida_hexrays.IPROP_IGNLOWSRC """ low part of the instruction source operand has been created artificially (this bit is used only for 'and x, 80...') """ IPROP_INV_JX = _ida_hexrays.IPROP_INV_JX """ inverted conditional jump """ IPROP_WAS_NORET = _ida_hexrays.IPROP_WAS_NORET """ was noret icall """ IPROP_MULTI_MOV = _ida_hexrays.IPROP_MULTI_MOV """ (example: STM on ARM may transfer multiple registers) the minsn was generated as part of insn that moves multiple registersbits that can be set by plugins: """ IPROP_DONT_PROP = _ida_hexrays.IPROP_DONT_PROP """ may not propagate """ IPROP_DONT_COMB = _ida_hexrays.IPROP_DONT_COMB """ may not combine this instruction with others """ OPTI_ADDREXPRS = _ida_hexrays.OPTI_ADDREXPRS """ optimize all address expressions (&x+N; &x-&y) """ OPTI_MINSTKREF = _ida_hexrays.OPTI_MINSTKREF """ may update minstkref """ OPTI_COMBINSNS = _ida_hexrays.OPTI_COMBINSNS """ may combine insns (only for optimize_insn) """ OPTI_NO_LDXOPT = _ida_hexrays.OPTI_NO_LDXOPT """ do not optimize low/high(ldx) """ EQ_IGNSIZE = _ida_hexrays.EQ_IGNSIZE """ ignore operand sizes """ EQ_IGNCODE = _ida_hexrays.EQ_IGNCODE """ ignore instruction opcodes """ EQ_CMPDEST = _ida_hexrays.EQ_CMPDEST """ compare instruction destinations """ EQ_OPTINSN = _ida_hexrays.EQ_OPTINSN """ optimize mop_d operands """ def getf_reginsn(*args): """ getf_reginsn(ins) -> minsn_t Skip assertions forward. @param ins (C++: const minsn_t *) """ return _ida_hexrays.getf_reginsn(*args) def getb_reginsn(*args): """ getb_reginsn(ins) -> minsn_t Skip assertions backward. @param ins (C++: const minsn_t *) """ return _ida_hexrays.getb_reginsn(*args) BLT_NONE = _ida_hexrays.BLT_NONE BLT_STOP = _ida_hexrays.BLT_STOP BLT_0WAY = _ida_hexrays.BLT_0WAY BLT_1WAY = _ida_hexrays.BLT_1WAY BLT_2WAY = _ida_hexrays.BLT_2WAY BLT_NWAY = _ida_hexrays.BLT_NWAY BLT_XTRN = _ida_hexrays.BLT_XTRN mblock_t_swigregister = _ida_hexrays.mblock_t_swigregister mblock_t_swigregister(mblock_t) MBL_PRIV = _ida_hexrays.MBL_PRIV """ the specified are accepted (used in patterns) private block - no instructions except """ MBL_NONFAKE = _ida_hexrays.MBL_NONFAKE """ regular block """ MBL_FAKE = _ida_hexrays.MBL_FAKE """ fake block (after a tail call) """ MBL_GOTO = _ida_hexrays.MBL_GOTO """ this block is a goto target """ MBL_TCAL = _ida_hexrays.MBL_TCAL """ aritifical call block for tail calls """ MBL_PUSH = _ida_hexrays.MBL_PUSH """ needs "convert push/pop instructions" """ MBL_DMT64 = _ida_hexrays.MBL_DMT64 """ needs "demote 64bits" """ MBL_COMB = _ida_hexrays.MBL_COMB """ needs "combine" pass """ MBL_PROP = _ida_hexrays.MBL_PROP """ needs 'propagation' pass """ MBL_DEAD = _ida_hexrays.MBL_DEAD """ needs "eliminate deads" pass """ MBL_LIST = _ida_hexrays.MBL_LIST """ use/def lists are ready (not dirty) """ MBL_INCONST = _ida_hexrays.MBL_INCONST """ inconsistent lists: we are building them """ MBL_CALL = _ida_hexrays.MBL_CALL """ call information has been built """ MBL_BACKPROP = _ida_hexrays.MBL_BACKPROP """ performed backprop_cc """ MBL_NORET = _ida_hexrays.MBL_NORET """ dead end block: doesn't return execution control """ MBL_DSLOT = _ida_hexrays.MBL_DSLOT """ block for delay slot """ MBL_VALRANGES = _ida_hexrays.MBL_VALRANGES """ should optimize using value ranges """ FD_BACKWARD = _ida_hexrays.FD_BACKWARD """ search direction """ FD_FORWARD = _ida_hexrays.FD_FORWARD """ search direction """ FD_USE = _ida_hexrays.FD_USE """ look for use """ FD_DEF = _ida_hexrays.FD_DEF """ look for definition """ FD_DIRTY = _ida_hexrays.FD_DIRTY """ by function calls and indirect memory access ignore possible implicit definitions """ VR_AT_START = _ida_hexrays.VR_AT_START """ at the block start (if M is NULL) get value ranges before the instruction or """ VR_AT_END = _ida_hexrays.VR_AT_END """ get value ranges after the instruction or at the block end, just after the last instruction (if M is NULL) """ VR_EXACT = _ida_hexrays.VR_EXACT """ valrng size will be >= vivl.size find exact match. if not set, the returned """ WARN_VARARG_REGS = _ida_hexrays.WARN_VARARG_REGS WARN_ILL_PURGED = _ida_hexrays.WARN_ILL_PURGED WARN_ILL_FUNCTYPE = _ida_hexrays.WARN_ILL_FUNCTYPE WARN_VARARG_TCAL = _ida_hexrays.WARN_VARARG_TCAL WARN_VARARG_NOSTK = _ida_hexrays.WARN_VARARG_NOSTK WARN_VARARG_MANY = _ida_hexrays.WARN_VARARG_MANY WARN_ADDR_OUTARGS = _ida_hexrays.WARN_ADDR_OUTARGS WARN_DEP_UNK_CALLS = _ida_hexrays.WARN_DEP_UNK_CALLS WARN_ILL_ELLIPSIS = _ida_hexrays.WARN_ILL_ELLIPSIS WARN_GUESSED_TYPE = _ida_hexrays.WARN_GUESSED_TYPE WARN_EXP_LINVAR = _ida_hexrays.WARN_EXP_LINVAR WARN_WIDEN_CHAINS = _ida_hexrays.WARN_WIDEN_CHAINS WARN_BAD_PURGED = _ida_hexrays.WARN_BAD_PURGED WARN_CBUILD_LOOPS = _ida_hexrays.WARN_CBUILD_LOOPS WARN_NO_SAVE_REST = _ida_hexrays.WARN_NO_SAVE_REST WARN_ODD_INPUT_REG = _ida_hexrays.WARN_ODD_INPUT_REG WARN_ODD_ADDR_USE = _ida_hexrays.WARN_ODD_ADDR_USE WARN_MUST_RET_FP = _ida_hexrays.WARN_MUST_RET_FP WARN_ILL_FPU_STACK = _ida_hexrays.WARN_ILL_FPU_STACK WARN_SELFREF_PROP = _ida_hexrays.WARN_SELFREF_PROP WARN_WOULD_OVERLAP = _ida_hexrays.WARN_WOULD_OVERLAP WARN_ARRAY_INARG = _ida_hexrays.WARN_ARRAY_INARG WARN_MAX_ARGS = _ida_hexrays.WARN_MAX_ARGS WARN_BAD_FIELD_TYPE = _ida_hexrays.WARN_BAD_FIELD_TYPE WARN_WRITE_CONST = _ida_hexrays.WARN_WRITE_CONST WARN_BAD_RETVAR = _ida_hexrays.WARN_BAD_RETVAR WARN_FRAG_LVAR = _ida_hexrays.WARN_FRAG_LVAR WARN_HUGE_STKOFF = _ida_hexrays.WARN_HUGE_STKOFF WARN_UNINITED_REG = _ida_hexrays.WARN_UNINITED_REG WARN_FIXED_MACRO = _ida_hexrays.WARN_FIXED_MACRO WARN_WRONG_VA_OFF = _ida_hexrays.WARN_WRONG_VA_OFF WARN_CR_NOFIELD = _ida_hexrays.WARN_CR_NOFIELD WARN_CR_BADOFF = _ida_hexrays.WARN_CR_BADOFF WARN_BAD_STROFF = _ida_hexrays.WARN_BAD_STROFF WARN_BAD_VARSIZE = _ida_hexrays.WARN_BAD_VARSIZE WARN_UNSUPP_REG = _ida_hexrays.WARN_UNSUPP_REG WARN_UNALIGNED_ARG = _ida_hexrays.WARN_UNALIGNED_ARG WARN_BAD_STD_TYPE = _ida_hexrays.WARN_BAD_STD_TYPE WARN_BAD_CALL_SP = _ida_hexrays.WARN_BAD_CALL_SP WARN_MISSED_SWITCH = _ida_hexrays.WARN_MISSED_SWITCH WARN_BAD_SP = _ida_hexrays.WARN_BAD_SP WARN_BAD_STKPNT = _ida_hexrays.WARN_BAD_STKPNT WARN_UNDEF_LVAR = _ida_hexrays.WARN_UNDEF_LVAR WARN_JUMPOUT = _ida_hexrays.WARN_JUMPOUT WARN_BAD_VALRNG = _ida_hexrays.WARN_BAD_VALRNG WARN_BAD_SHADOW = _ida_hexrays.WARN_BAD_SHADOW WARN_MAX = _ida_hexrays.WARN_MAX hexwarn_t_swigregister = _ida_hexrays.hexwarn_t_swigregister hexwarn_t_swigregister(hexwarn_t) MMAT_ZERO = _ida_hexrays.MMAT_ZERO MMAT_GENERATED = _ida_hexrays.MMAT_GENERATED MMAT_PREOPTIMIZED = _ida_hexrays.MMAT_PREOPTIMIZED MMAT_LOCOPT = _ida_hexrays.MMAT_LOCOPT MMAT_CALLS = _ida_hexrays.MMAT_CALLS MMAT_GLBOPT1 = _ida_hexrays.MMAT_GLBOPT1 MMAT_GLBOPT2 = _ida_hexrays.MMAT_GLBOPT2 MMAT_GLBOPT3 = _ida_hexrays.MMAT_GLBOPT3 MMAT_LVARS = _ida_hexrays.MMAT_LVARS MMIDX_GLBLOW = _ida_hexrays.MMIDX_GLBLOW MMIDX_LVARS = _ida_hexrays.MMIDX_LVARS MMIDX_RETADDR = _ida_hexrays.MMIDX_RETADDR MMIDX_SHADOW = _ida_hexrays.MMIDX_SHADOW MMIDX_ARGS = _ida_hexrays.MMIDX_ARGS MMIDX_GLBHIGH = _ida_hexrays.MMIDX_GLBHIGH mba_ranges_t_swigregister = _ida_hexrays.mba_ranges_t_swigregister mba_ranges_t_swigregister(mba_ranges_t) mba_range_iterator_t_swigregister = _ida_hexrays.mba_range_iterator_t_swigregister mba_range_iterator_t_swigregister(mba_range_iterator_t) mbl_array_t_swigregister = _ida_hexrays.mbl_array_t_swigregister mbl_array_t_swigregister(mbl_array_t) MBA_PRCDEFS = _ida_hexrays.MBA_PRCDEFS """ use precise defeas for chain-allocated lvars """ MBA_NOFUNC = _ida_hexrays.MBA_NOFUNC """ function is not present, addresses might be wrong """ MBA_PATTERN = _ida_hexrays.MBA_PATTERN """ microcode pattern, callinfo is present """ MBA_LOADED = _ida_hexrays.MBA_LOADED """ loaded gdl, no instructions (debugging) """ MBA_RETFP = _ida_hexrays.MBA_RETFP """ function returns floating point value """ MBA_SPLINFO = _ida_hexrays.MBA_SPLINFO """ (final_type ? idb_spoiled : spoiled_regs) is valid """ MBA_PASSREGS = _ida_hexrays.MBA_PASSREGS """ has 'mcallinfo_t::pass_regs' """ MBA_THUNK = _ida_hexrays.MBA_THUNK """ thunk function """ MBA_CMNSTK = _ida_hexrays.MBA_CMNSTK """ stkvars+stkargs should be considered as one area """ MBA_PREOPT = _ida_hexrays.MBA_PREOPT """ preoptimization stage complete """ MBA_CMBBLK = _ida_hexrays.MBA_CMBBLK """ request to combine blocks """ MBA_ASRTOK = _ida_hexrays.MBA_ASRTOK """ assertions have been generated """ MBA_CALLS = _ida_hexrays.MBA_CALLS """ callinfo has been built """ MBA_ASRPROP = _ida_hexrays.MBA_ASRPROP """ assertion have been propagated """ MBA_SAVRST = _ida_hexrays.MBA_SAVRST """ save-restore analysis has been performed """ MBA_RETREF = _ida_hexrays.MBA_RETREF """ return type has been refined """ MBA_GLBOPT = _ida_hexrays.MBA_GLBOPT """ microcode has been optimized globally """ MBA_OVERVAR = _ida_hexrays.MBA_OVERVAR """ an overlapped variable has been detected """ MBA_LVARS0 = _ida_hexrays.MBA_LVARS0 """ lvar pre-allocation has been performed """ MBA_LVARS1 = _ida_hexrays.MBA_LVARS1 """ lvar real allocation has been performed """ MBA_DELPAIRS = _ida_hexrays.MBA_DELPAIRS """ pairs have been deleted once """ MBA_CHVARS = _ida_hexrays.MBA_CHVARS """ can verify chain varnums """ MBA_SHORT = _ida_hexrays.MBA_SHORT """ use short display """ MBA_COLGDL = _ida_hexrays.MBA_COLGDL """ display graph after each reduction """ MBA_INSGDL = _ida_hexrays.MBA_INSGDL """ display instruction in graphs """ MBA_NICE = _ida_hexrays.MBA_NICE """ apply transformations to c code """ MBA_REFINE = _ida_hexrays.MBA_REFINE """ may refine return value size """ MBA_RESERVED = _ida_hexrays.MBA_RESERVED MBA_WINGR32 = _ida_hexrays.MBA_WINGR32 """ use wingraph32 """ MBA_NUMADDR = _ida_hexrays.MBA_NUMADDR """ display definition addresses for numbers """ MBA_VALNUM = _ida_hexrays.MBA_VALNUM """ display value numbers """ MBA_INITIAL_FLAGS = _ida_hexrays.MBA_INITIAL_FLAGS MBA2_LVARNAMES_OK = _ida_hexrays.MBA2_LVARNAMES_OK MBA2_LVARS_RENAMED = _ida_hexrays.MBA2_LVARS_RENAMED MBA2_OVER_CHAINS = _ida_hexrays.MBA2_OVER_CHAINS MBA2_VALRNG_DONE = _ida_hexrays.MBA2_VALRNG_DONE MBA2_IS_CTR = _ida_hexrays.MBA2_IS_CTR MBA2_IS_DTR = _ida_hexrays.MBA2_IS_DTR MBA2_ARGIDX_OK = _ida_hexrays.MBA2_ARGIDX_OK MBA2_NO_DUP_CALLS = _ida_hexrays.MBA2_NO_DUP_CALLS MBA2_NO_DUP_LVARS = _ida_hexrays.MBA2_NO_DUP_LVARS MBA2_INITIAL_FLAGS = _ida_hexrays.MBA2_INITIAL_FLAGS MBA2_ALL_FLAGS = _ida_hexrays.MBA2_ALL_FLAGS NALT_VD = _ida_hexrays.NALT_VD """ this index is not used by ida """ LOCOPT_ALL = _ida_hexrays.LOCOPT_ALL """ is not set, only dirty blocks will be optimized redo optimization for all blocks. if this bit """ LOCOPT_REFINE = _ida_hexrays.LOCOPT_REFINE """ refine return type, ok to fail """ LOCOPT_REFINE2 = _ida_hexrays.LOCOPT_REFINE2 """ refine return type, try harder """ ACFL_LOCOPT = _ida_hexrays.ACFL_LOCOPT """ perform local propagation (requires ACFL_BLKOPT) """ ACFL_BLKOPT = _ida_hexrays.ACFL_BLKOPT """ perform interblock transformations """ ACFL_GLBPROP = _ida_hexrays.ACFL_GLBPROP """ perform global propagation """ ACFL_GLBDEL = _ida_hexrays.ACFL_GLBDEL """ perform dead code eliminition """ ACFL_GUESS = _ida_hexrays.ACFL_GUESS """ may guess calling conventions """ CPBLK_FAST = _ida_hexrays.CPBLK_FAST """ do not update minbstkref and minbargref """ CPBLK_MINREF = _ida_hexrays.CPBLK_MINREF """ update minbstkref and minbargref """ CPBLK_OPTJMP = _ida_hexrays.CPBLK_OPTJMP """ if it becomes useless del the jump insn at the end of the block """ def mbl_array_t_deserialize(*args): """ mbl_array_t_deserialize(bytes, nbytes) -> mbl_array_t """ return _ida_hexrays.mbl_array_t_deserialize(*args) chain_keeper_t_swigregister = _ida_hexrays.chain_keeper_t_swigregister chain_keeper_t_swigregister(chain_keeper_t) GC_REGS_AND_STKVARS = _ida_hexrays.GC_REGS_AND_STKVARS GC_ASR = _ida_hexrays.GC_ASR GC_XDSU = _ida_hexrays.GC_XDSU GC_END = _ida_hexrays.GC_END GC_DIRTY_ALL = _ida_hexrays.GC_DIRTY_ALL mbl_graph_t_swigregister = _ida_hexrays.mbl_graph_t_swigregister mbl_graph_t_swigregister(mbl_graph_t) codegen_t_swigregister = _ida_hexrays.codegen_t_swigregister codegen_t_swigregister(codegen_t) def is_kreg(*args): """ is_kreg(r) -> bool Is a kernel register? @param r (C++: mreg_t) """ return _ida_hexrays.is_kreg(*args) def get_temp_regs(*args): """ get_temp_regs() -> mlist_t Get list of temporary registers. Tempregs are temporary registers that are used during code generation. They do not map to regular processor registers. They are used only to store temporary values during execution of one instruction. Tempregs may not be used to pass a value from one block to another. In other words, at the end of a block all tempregs must be dead. """ return _ida_hexrays.get_temp_regs(*args) def get_hexrays_version(*args): """ get_hexrays_version() -> char const * Get decompiler version. The returned string is of the form <major>.<minor>.<revision>.<build-date> @return: pointer to version string. For example: "2.0.0.140605" """ return _ida_hexrays.get_hexrays_version(*args) def checkout_hexrays_license(*args): """ checkout_hexrays_license(silent) -> bool Check out a floating decompiler license. This function will display a dialog box if the license is not available. For non-floating licenses this function is effectively no-op. It is not necessary to call this function before decompiling. If the license was not checked out, the decompiler will automatically do it. This function can be used to check out a license in advance and ensure that a license is available. @param silent: silently fail if the license can not be checked out. (C++: bool) @return: false if failed """ return _ida_hexrays.checkout_hexrays_license(*args) def open_pseudocode(*args): """ open_pseudocode(ea, new_window) -> vdui_t Open pseudocode window. The specified function is decompiled and the pseudocode window is opened. @param ea: function to decompile (C++: ea_t) @param new_window: 0:reuse existing window; 1:open new window; -1: reuse existing window if the current view is pseudocode (C++: int) @return: false if failed """ return _ida_hexrays.open_pseudocode(*args) def close_pseudocode(*args): """ close_pseudocode(f) -> bool Close pseudocode window. @param f: pointer to window (C++: TWidget *) @return: false if failed """ return _ida_hexrays.close_pseudocode(*args) VDRUN_NEWFILE = _ida_hexrays.VDRUN_NEWFILE """ Create a new file or overwrite existing file. """ VDRUN_APPEND = _ida_hexrays.VDRUN_APPEND """ Create a new file or append to existing file. """ VDRUN_ONLYNEW = _ida_hexrays.VDRUN_ONLYNEW """ Fail if output file already exists. """ VDRUN_SILENT = _ida_hexrays.VDRUN_SILENT """ Silent decompilation. """ VDRUN_SENDIDB = _ida_hexrays.VDRUN_SENDIDB """ Send problematic databases to hex-rays.com. """ VDRUN_MAYSTOP = _ida_hexrays.VDRUN_MAYSTOP """ the user can cancel decompilation """ VDRUN_CMDLINE = _ida_hexrays.VDRUN_CMDLINE """ called from ida's command line """ VDRUN_STATS = _ida_hexrays.VDRUN_STATS """ print statistics into vd_stats.txt """ VDRUN_LUMINA = _ida_hexrays.VDRUN_LUMINA """ use lumina server """ def decompile_many(*args): """ decompile_many(outfile, funcaddrs, flags) -> bool Batch decompilation. Decompile all or the specified functions @param outfile: name of the output file (C++: const char *) @param funcaddrs: list of functions to decompile. If NULL or empty, then decompile all nonlib functions (C++: eavec_t *) @param flags: Batch decompilation bits (C++: int) @return: true if no internal error occurred and the user has not cancelled decompilation """ return _ida_hexrays.decompile_many(*args) hexrays_failure_t_swigregister = _ida_hexrays.hexrays_failure_t_swigregister hexrays_failure_t_swigregister(hexrays_failure_t) vd_failure_t_swigregister = _ida_hexrays.vd_failure_t_swigregister vd_failure_t_swigregister(vd_failure_t) vd_interr_t_swigregister = _ida_hexrays.vd_interr_t_swigregister vd_interr_t_swigregister(vd_interr_t) def send_database(*args): """ send_database(err, silent) Send the database to Hex-Rays. This function sends the current database to the Hex-Rays server. The database is sent in the compressed form over an encrypted (SSL) connection. @param err: failure description object. Empty hexrays_failure_t object can be used if error information is not available. (C++: const hexrays_failure_t &) @param silent: if false, a dialog box will be displayed before sending the database. (C++: bool) """ return _ida_hexrays.send_database(*args) gco_info_t_swigregister = _ida_hexrays.gco_info_t_swigregister gco_info_t_swigregister(gco_info_t) GCO_STK = _ida_hexrays.GCO_STK """ a stack variable """ GCO_REG = _ida_hexrays.GCO_REG """ is register? otherwise a stack variable """ GCO_USE = _ida_hexrays.GCO_USE """ is source operand? """ GCO_DEF = _ida_hexrays.GCO_DEF """ is destination operand? """ def get_current_operand(*args): """ get_current_operand(out) -> bool Get the instruction operand under the cursor. This function determines the operand that is under the cursor in the active disassembly listing. If the operand refers to a register or stack variable, it return true. @param out (C++: gco_info_t *) """ return _ida_hexrays.get_current_operand(*args) def remitem(*args): """ remitem(e) """ return _ida_hexrays.remitem(*args) cot_empty = _ida_hexrays.cot_empty cot_comma = _ida_hexrays.cot_comma cot_asg = _ida_hexrays.cot_asg cot_asgbor = _ida_hexrays.cot_asgbor cot_asgxor = _ida_hexrays.cot_asgxor cot_asgband = _ida_hexrays.cot_asgband cot_asgadd = _ida_hexrays.cot_asgadd cot_asgsub = _ida_hexrays.cot_asgsub cot_asgmul = _ida_hexrays.cot_asgmul cot_asgsshr = _ida_hexrays.cot_asgsshr cot_asgushr = _ida_hexrays.cot_asgushr cot_asgshl = _ida_hexrays.cot_asgshl cot_asgsdiv = _ida_hexrays.cot_asgsdiv cot_asgudiv = _ida_hexrays.cot_asgudiv cot_asgsmod = _ida_hexrays.cot_asgsmod cot_asgumod = _ida_hexrays.cot_asgumod cot_tern = _ida_hexrays.cot_tern cot_lor = _ida_hexrays.cot_lor cot_land = _ida_hexrays.cot_land cot_bor = _ida_hexrays.cot_bor cot_xor = _ida_hexrays.cot_xor cot_band = _ida_hexrays.cot_band cot_eq = _ida_hexrays.cot_eq cot_ne = _ida_hexrays.cot_ne cot_sge = _ida_hexrays.cot_sge cot_uge = _ida_hexrays.cot_uge cot_sle = _ida_hexrays.cot_sle cot_ule = _ida_hexrays.cot_ule cot_sgt = _ida_hexrays.cot_sgt cot_ugt = _ida_hexrays.cot_ugt cot_slt = _ida_hexrays.cot_slt cot_ult = _ida_hexrays.cot_ult cot_sshr = _ida_hexrays.cot_sshr cot_ushr = _ida_hexrays.cot_ushr cot_shl = _ida_hexrays.cot_shl cot_add = _ida_hexrays.cot_add cot_sub = _ida_hexrays.cot_sub cot_mul = _ida_hexrays.cot_mul cot_sdiv = _ida_hexrays.cot_sdiv cot_udiv = _ida_hexrays.cot_udiv cot_smod = _ida_hexrays.cot_smod cot_umod = _ida_hexrays.cot_umod cot_fadd = _ida_hexrays.cot_fadd cot_fsub = _ida_hexrays.cot_fsub cot_fmul = _ida_hexrays.cot_fmul cot_fdiv = _ida_hexrays.cot_fdiv cot_fneg = _ida_hexrays.cot_fneg cot_neg = _ida_hexrays.cot_neg cot_cast = _ida_hexrays.cot_cast cot_lnot = _ida_hexrays.cot_lnot cot_bnot = _ida_hexrays.cot_bnot cot_ptr = _ida_hexrays.cot_ptr cot_ref = _ida_hexrays.cot_ref cot_postinc = _ida_hexrays.cot_postinc cot_postdec = _ida_hexrays.cot_postdec cot_preinc = _ida_hexrays.cot_preinc cot_predec = _ida_hexrays.cot_predec cot_call = _ida_hexrays.cot_call cot_idx = _ida_hexrays.cot_idx cot_memref = _ida_hexrays.cot_memref cot_memptr = _ida_hexrays.cot_memptr cot_num = _ida_hexrays.cot_num cot_fnum = _ida_hexrays.cot_fnum cot_str = _ida_hexrays.cot_str cot_obj = _ida_hexrays.cot_obj cot_var = _ida_hexrays.cot_var cot_insn = _ida_hexrays.cot_insn cot_sizeof = _ida_hexrays.cot_sizeof cot_helper = _ida_hexrays.cot_helper cot_type = _ida_hexrays.cot_type cot_last = _ida_hexrays.cot_last cit_empty = _ida_hexrays.cit_empty cit_block = _ida_hexrays.cit_block cit_expr = _ida_hexrays.cit_expr cit_if = _ida_hexrays.cit_if cit_for = _ida_hexrays.cit_for cit_while = _ida_hexrays.cit_while cit_do = _ida_hexrays.cit_do cit_switch = _ida_hexrays.cit_switch cit_break = _ida_hexrays.cit_break cit_continue = _ida_hexrays.cit_continue cit_return = _ida_hexrays.cit_return cit_goto = _ida_hexrays.cit_goto cit_asm = _ida_hexrays.cit_asm cit_end = _ida_hexrays.cit_end operator_info_t_swigregister = _ida_hexrays.operator_info_t_swigregister operator_info_t_swigregister(operator_info_t) FX_NONE = cvar.FX_NONE FX_INFIX = cvar.FX_INFIX FX_PREFIX = cvar.FX_PREFIX FX_POSTFIX = cvar.FX_POSTFIX FX_TERNARY = cvar.FX_TERNARY COI_RL = cvar.COI_RL COI_LR = cvar.COI_LR COI_INT = cvar.COI_INT COI_FP = cvar.COI_FP COI_SH = cvar.COI_SH COI_SGN = cvar.COI_SGN COI_SBN = cvar.COI_SBN def negated_relation(*args): """ negated_relation(op) -> ctype_t Negate a comparison operator. For example, cot_sge becomes cot_slt. @param op (C++: ctype_t) """ return _ida_hexrays.negated_relation(*args) def swapped_relation(*args): """ swapped_relation(op) -> ctype_t Swap a comparison operator. For example, cot_sge becomes cot_sle. @param op (C++: ctype_t) """ return _ida_hexrays.swapped_relation(*args) def get_op_signness(*args): """ get_op_signness(op) -> type_sign_t Get operator sign. Meaningful for sign-dependent operators, like cot_sdiv. @param op (C++: ctype_t) """ return _ida_hexrays.get_op_signness(*args) def asgop(*args): """ asgop(cop) -> ctype_t Convert plain operator into assignment operator. For example, cot_add returns cot_asgadd. @param cop (C++: ctype_t) """ return _ida_hexrays.asgop(*args) def asgop_revert(*args): """ asgop_revert(cop) -> ctype_t Convert assignment operator into plain operator. For example, cot_asgadd returns cot_add @param cop (C++: ctype_t) @return: cot_empty is the input operator is not an assignment operator. """ return _ida_hexrays.asgop_revert(*args) def op_uses_x(*args): """ op_uses_x(op) -> bool Does operator use the 'x' field of 'cexpr_t' ? @param op (C++: ctype_t) """ return _ida_hexrays.op_uses_x(*args) def op_uses_y(*args): """ op_uses_y(op) -> bool Does operator use the 'y' field of 'cexpr_t' ? @param op (C++: ctype_t) """ return _ida_hexrays.op_uses_y(*args) def op_uses_z(*args): """ op_uses_z(op) -> bool Does operator use the 'z' field of 'cexpr_t' ? @param op (C++: ctype_t) """ return _ida_hexrays.op_uses_z(*args) def is_binary(*args): """ is_binary(op) -> bool Is binary operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_binary(*args) def is_unary(*args): """ is_unary(op) -> bool Is unary operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_unary(*args) def is_relational(*args): """ is_relational(op) -> bool Is comparison operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_relational(*args) def is_assignment(*args): """ is_assignment(op) -> bool Is assignment operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_assignment(*args) def accepts_udts(*args): """ accepts_udts(op) -> bool """ return _ida_hexrays.accepts_udts(*args) def is_prepost(*args): """ is_prepost(op) -> bool Is pre/post increment/decrement operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_prepost(*args) def is_commutative(*args): """ is_commutative(op) -> bool Is commutative operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_commutative(*args) def is_additive(*args): """ is_additive(op) -> bool Is additive operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_additive(*args) def is_multiplicative(*args): """ is_multiplicative(op) -> bool Is multiplicative operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_multiplicative(*args) def is_bitop(*args): """ is_bitop(op) -> bool Is bit related operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_bitop(*args) def is_logical(*args): """ is_logical(op) -> bool Is logical operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_logical(*args) def is_loop(*args): """ is_loop(op) -> bool Is loop statement code? @param op (C++: ctype_t) """ return _ida_hexrays.is_loop(*args) def is_break_consumer(*args): """ is_break_consumer(op) -> bool Does a break statement influence the specified statement code? @param op (C++: ctype_t) """ return _ida_hexrays.is_break_consumer(*args) def is_lvalue(*args): """ is_lvalue(op) -> bool Is Lvalue operator? @param op (C++: ctype_t) """ return _ida_hexrays.is_lvalue(*args) def accepts_small_udts(*args): """ accepts_small_udts(op) -> bool Is the operator allowed on small structure or union? @param op (C++: ctype_t) """ return _ida_hexrays.accepts_small_udts(*args) cnumber_t_swigregister = _ida_hexrays.cnumber_t_swigregister cnumber_t_swigregister(cnumber_t) var_ref_t_swigregister = _ida_hexrays.var_ref_t_swigregister var_ref_t_swigregister(var_ref_t) ctree_visitor_t_swigregister = _ida_hexrays.ctree_visitor_t_swigregister ctree_visitor_t_swigregister(ctree_visitor_t) CV_FAST = _ida_hexrays.CV_FAST """ do not maintain parent information """ CV_PRUNE = _ida_hexrays.CV_PRUNE """ this bit is set by visit...() to prune the walk """ CV_PARENTS = _ida_hexrays.CV_PARENTS """ maintain parent information """ CV_POST = _ida_hexrays.CV_POST """ call the leave...() functions """ CV_RESTART = _ida_hexrays.CV_RESTART """ restart enumeration at the top expr (apply_to_exprs) """ CV_INSNS = _ida_hexrays.CV_INSNS """ visit only statements, prune all expressions do not use before the final ctree maturity because expressions may contain statements at intermediate stages (see cot_insn). Otherwise you risk missing statements embedded into expressions. """ ctree_parentee_t_swigregister = _ida_hexrays.ctree_parentee_t_swigregister ctree_parentee_t_swigregister(ctree_parentee_t) cfunc_parentee_t_swigregister = _ida_hexrays.cfunc_parentee_t_swigregister cfunc_parentee_t_swigregister(cfunc_parentee_t) CMAT_ZERO = _ida_hexrays.CMAT_ZERO CMAT_BUILT = _ida_hexrays.CMAT_BUILT CMAT_TRANS1 = _ida_hexrays.CMAT_TRANS1 CMAT_NICE = _ida_hexrays.CMAT_NICE CMAT_TRANS2 = _ida_hexrays.CMAT_TRANS2 CMAT_CPA = _ida_hexrays.CMAT_CPA CMAT_TRANS3 = _ida_hexrays.CMAT_TRANS3 CMAT_CASTED = _ida_hexrays.CMAT_CASTED CMAT_FINAL = _ida_hexrays.CMAT_FINAL ITP_EMPTY = _ida_hexrays.ITP_EMPTY ITP_ARG1 = _ida_hexrays.ITP_ARG1 ITP_ARG64 = _ida_hexrays.ITP_ARG64 ITP_BRACE1 = _ida_hexrays.ITP_BRACE1 ITP_INNER_LAST = _ida_hexrays.ITP_INNER_LAST ITP_ASM = _ida_hexrays.ITP_ASM ITP_ELSE = _ida_hexrays.ITP_ELSE ITP_DO = _ida_hexrays.ITP_DO ITP_SEMI = _ida_hexrays.ITP_SEMI ITP_CURLY1 = _ida_hexrays.ITP_CURLY1 ITP_CURLY2 = _ida_hexrays.ITP_CURLY2 ITP_BRACE2 = _ida_hexrays.ITP_BRACE2 ITP_COLON = _ida_hexrays.ITP_COLON ITP_BLOCK1 = _ida_hexrays.ITP_BLOCK1 ITP_BLOCK2 = _ida_hexrays.ITP_BLOCK2 ITP_CASE = _ida_hexrays.ITP_CASE ITP_SIGN = _ida_hexrays.ITP_SIGN treeloc_t_swigregister = _ida_hexrays.treeloc_t_swigregister treeloc_t_swigregister(treeloc_t) RETRIEVE_ONCE = _ida_hexrays.RETRIEVE_ONCE RETRIEVE_ALWAYS = _ida_hexrays.RETRIEVE_ALWAYS citem_cmt_t_swigregister = _ida_hexrays.citem_cmt_t_swigregister citem_cmt_t_swigregister(citem_cmt_t) citem_locator_t_swigregister = _ida_hexrays.citem_locator_t_swigregister citem_locator_t_swigregister(citem_locator_t) bit_bound_t_swigregister = _ida_hexrays.bit_bound_t_swigregister bit_bound_t_swigregister(bit_bound_t) citem_t_swigregister = _ida_hexrays.citem_t_swigregister citem_t_swigregister(citem_t) cexpr_t_swigregister = _ida_hexrays.cexpr_t_swigregister cexpr_t_swigregister(cexpr_t) EXFL_CPADONE = _ida_hexrays.EXFL_CPADONE """ pointer arithmetic correction done """ EXFL_LVALUE = _ida_hexrays.EXFL_LVALUE """ expression is lvalue even if it doesn't look like it """ EXFL_FPOP = _ida_hexrays.EXFL_FPOP """ floating point operation """ EXFL_ALONE = _ida_hexrays.EXFL_ALONE """ standalone helper """ EXFL_CSTR = _ida_hexrays.EXFL_CSTR """ string literal """ EXFL_PARTIAL = _ida_hexrays.EXFL_PARTIAL """ type of the expression is considered partial """ EXFL_UNDEF = _ida_hexrays.EXFL_UNDEF """ expression uses undefined value """ EXFL_JUMPOUT = _ida_hexrays.EXFL_JUMPOUT """ jump out-of-function """ EXFL_VFTABLE = _ida_hexrays.EXFL_VFTABLE """ is ptr to vftable (used for cot_memptr, cot_memref) """ EXFL_ALL = _ida_hexrays.EXFL_ALL """ all currently defined bits """ ceinsn_t_swigregister = _ida_hexrays.ceinsn_t_swigregister ceinsn_t_swigregister(ceinsn_t) CALC_CURLY_BRACES = _ida_hexrays.CALC_CURLY_BRACES NO_CURLY_BRACES = _ida_hexrays.NO_CURLY_BRACES USE_CURLY_BRACES = _ida_hexrays.USE_CURLY_BRACES cif_t_swigregister = _ida_hexrays.cif_t_swigregister cif_t_swigregister(cif_t) cloop_t_swigregister = _ida_hexrays.cloop_t_swigregister cloop_t_swigregister(cloop_t) cfor_t_swigregister = _ida_hexrays.cfor_t_swigregister cfor_t_swigregister(cfor_t) cwhile_t_swigregister = _ida_hexrays.cwhile_t_swigregister cwhile_t_swigregister(cwhile_t) cdo_t_swigregister = _ida_hexrays.cdo_t_swigregister cdo_t_swigregister(cdo_t) creturn_t_swigregister = _ida_hexrays.creturn_t_swigregister creturn_t_swigregister(creturn_t) cgoto_t_swigregister = _ida_hexrays.cgoto_t_swigregister cgoto_t_swigregister(cgoto_t) casm_t_swigregister = _ida_hexrays.casm_t_swigregister casm_t_swigregister(casm_t) cinsn_t_swigregister = _ida_hexrays.cinsn_t_swigregister cinsn_t_swigregister(cinsn_t) def cinsn_t_insn_is_epilog(*args): """ cinsn_t_insn_is_epilog(insn) -> bool """ return _ida_hexrays.cinsn_t_insn_is_epilog(*args) cblock_t_swigregister = _ida_hexrays.cblock_t_swigregister cblock_t_swigregister(cblock_t) carg_t_swigregister = _ida_hexrays.carg_t_swigregister carg_t_swigregister(carg_t) carglist_t_swigregister = _ida_hexrays.carglist_t_swigregister carglist_t_swigregister(carglist_t) CFL_FINAL = _ida_hexrays.CFL_FINAL """ call type is final, should not be changed """ CFL_HELPER = _ida_hexrays.CFL_HELPER """ created from a decompiler helper function """ ccase_t_swigregister = _ida_hexrays.ccase_t_swigregister ccase_t_swigregister(ccase_t) ccases_t_swigregister = _ida_hexrays.ccases_t_swigregister ccases_t_swigregister(ccases_t) cswitch_t_swigregister = _ida_hexrays.cswitch_t_swigregister cswitch_t_swigregister(cswitch_t) ctree_anchor_t_swigregister = _ida_hexrays.ctree_anchor_t_swigregister ctree_anchor_t_swigregister(ctree_anchor_t) ANCHOR_INDEX = _ida_hexrays.ANCHOR_INDEX ANCHOR_MASK = _ida_hexrays.ANCHOR_MASK ANCHOR_CITEM = _ida_hexrays.ANCHOR_CITEM """ c-tree item """ ANCHOR_LVAR = _ida_hexrays.ANCHOR_LVAR """ declaration of local variable """ ANCHOR_ITP = _ida_hexrays.ANCHOR_ITP """ item type preciser """ ANCHOR_BLKCMT = _ida_hexrays.ANCHOR_BLKCMT """ block comment (for ctree items) """ VDI_NONE = _ida_hexrays.VDI_NONE VDI_EXPR = _ida_hexrays.VDI_EXPR VDI_LVAR = _ida_hexrays.VDI_LVAR VDI_FUNC = _ida_hexrays.VDI_FUNC VDI_TAIL = _ida_hexrays.VDI_TAIL ctree_item_t_swigregister = _ida_hexrays.ctree_item_t_swigregister ctree_item_t_swigregister(ctree_item_t) GLN_CURRENT = _ida_hexrays.GLN_CURRENT """ get label of the current item """ GLN_GOTO_TARGET = _ida_hexrays.GLN_GOTO_TARGET """ get goto target """ GLN_ALL = _ida_hexrays.GLN_ALL """ get both """ FORBID_UNUSED_LABELS = _ida_hexrays.FORBID_UNUSED_LABELS ALLOW_UNUSED_LABELS = _ida_hexrays.ALLOW_UNUSED_LABELS def _ll_lnot(*args): """ _ll_lnot(e) -> cexpr_t """ return _ida_hexrays._ll_lnot(*args) def _ll_new_block(*args): """ _ll_new_block() -> cinsn_t """ return _ida_hexrays._ll_new_block(*args) def _ll_create_helper(*args): """ _ll_create_helper(standalone, type, format) -> cexpr_t """ return _ida_hexrays._ll_create_helper(*args) def _ll_call_helper(*args): """ _ll_call_helper(rettype, args, format) -> cexpr_t """ return _ida_hexrays._ll_call_helper(*args) def _ll_make_num(*args): """ _ll_make_num(n, func=None, ea=BADADDR, opnum=0, sign=no_sign, size=0) -> cexpr_t """ return _ida_hexrays._ll_make_num(*args) def _ll_make_ref(*args): """ _ll_make_ref(e) -> cexpr_t """ return _ida_hexrays._ll_make_ref(*args) def _ll_dereference(*args): """ _ll_dereference(e, ptrsize, is_flt=False) -> cexpr_t """ return _ida_hexrays._ll_dereference(*args) def save_user_labels(*args): """ save_user_labels(func_ea, user_labels) Save user defined labels into the database. @param func_ea: the entry address of the function (C++: ea_t) @param user_labels: collection of user defined labels (C++: const user_labels_t *) """ return _ida_hexrays.save_user_labels(*args) def save_user_cmts(*args): """ save_user_cmts(func_ea, user_cmts) Save user defined comments into the database. @param func_ea: the entry address of the function (C++: ea_t) @param user_cmts: collection of user defined comments (C++: const user_cmts_t *) """ return _ida_hexrays.save_user_cmts(*args) def save_user_numforms(*args): """ save_user_numforms(func_ea, numforms) Save user defined number formats into the database. @param func_ea: the entry address of the function (C++: ea_t) @param numforms: collection of user defined comments (C++: const user_numforms_t *) """ return _ida_hexrays.save_user_numforms(*args) def save_user_iflags(*args): """ save_user_iflags(func_ea, iflags) Save user defined citem iflags into the database. @param func_ea: the entry address of the function (C++: ea_t) @param iflags: collection of user defined citem iflags (C++: const user_iflags_t *) """ return _ida_hexrays.save_user_iflags(*args) def save_user_unions(*args): """ save_user_unions(func_ea, unions) Save user defined union field selections into the database. @param func_ea: the entry address of the function (C++: ea_t) @param unions: collection of union field selections (C++: const user_unions_t *) """ return _ida_hexrays.save_user_unions(*args) def restore_user_labels(*args): """ restore_user_labels(func_ea) -> user_labels_t Restore user defined labels from the database. @param func_ea: the entry address of the function (C++: ea_t) @return: collection of user defined labels. The returned object must be deleted by the caller using delete_user_labels() """ return _ida_hexrays.restore_user_labels(*args) def restore_user_cmts(*args): """ restore_user_cmts(func_ea) -> user_cmts_t Restore user defined comments from the database. @param func_ea: the entry address of the function (C++: ea_t) @return: collection of user defined comments. The returned object must be deleted by the caller using delete_user_cmts() """ return _ida_hexrays.restore_user_cmts(*args) def restore_user_numforms(*args): """ restore_user_numforms(func_ea) -> user_numforms_t Restore user defined number formats from the database. @param func_ea: the entry address of the function (C++: ea_t) @return: collection of user defined number formats. The returned object must be deleted by the caller using delete_user_numforms() """ return _ida_hexrays.restore_user_numforms(*args) def restore_user_iflags(*args): """ restore_user_iflags(func_ea) -> user_iflags_t Restore user defined citem iflags from the database. @param func_ea: the entry address of the function (C++: ea_t) @return: collection of user defined iflags. The returned object must be deleted by the caller using delete_user_iflags() """ return _ida_hexrays.restore_user_iflags(*args) def restore_user_unions(*args): """ restore_user_unions(func_ea) -> user_unions_t Restore user defined union field selections from the database. @param func_ea: the entry address of the function (C++: ea_t) @return: collection of union field selections The returned object must be deleted by the caller using delete_user_unions() """ return _ida_hexrays.restore_user_unions(*args) cfunc_t_swigregister = _ida_hexrays.cfunc_t_swigregister cfunc_t_swigregister(cfunc_t) CIT_COLLAPSED = _ida_hexrays.CIT_COLLAPSED """ display element in collapsed form """ CFS_BOUNDS = _ida_hexrays.CFS_BOUNDS """ 'eamap' and 'boundaries' are ready """ CFS_TEXT = _ida_hexrays.CFS_TEXT """ 'sv' is ready (and hdrlines) """ CFS_LVARS_HIDDEN = _ida_hexrays.CFS_LVARS_HIDDEN """ local variable definitions are collapsed """ DECOMP_NO_WAIT = _ida_hexrays.DECOMP_NO_WAIT """ do not display waitbox """ DECOMP_NO_CACHE = _ida_hexrays.DECOMP_NO_CACHE """ do not use decompilation cache """ DECOMP_NO_FRAME = _ida_hexrays.DECOMP_NO_FRAME """ do not use function frame info (only snippet mode) """ DECOMP_WARNINGS = _ida_hexrays.DECOMP_WARNINGS """ display warnings in the output window """ def decompile(*args): """ decompile(mbr, hf, flags=0) -> cfuncptr_t Decompile a snippet or a function. @param mbr: what to decompile (C++: const mba_ranges_t &) @param hf: extended error information (if failed) (C++: hexrays_failure_t *) @param flags: bitwise combination of decompile() flags ... bits (C++: int) @return: pointer to the decompilation result (a reference counted pointer). NULL if failed. """ return _ida_hexrays.decompile(*args) def decompile_func(*args): """ decompile_func(pfn, hf, flags=0) -> cfuncptr_t Decompile a function. Multiple decompilations of the same function return the same object. @param pfn: pointer to function to decompile (C++: func_t *) @param hf: extended error information (if failed) (C++: hexrays_failure_t *) @param flags: bitwise combination of decompile() flags ... bits (C++: int) @return: pointer to the decompilation result (a reference counted pointer). NULL if failed. """ return _ida_hexrays.decompile_func(*args) def gen_microcode(*args): """ gen_microcode(mbr, hf, retlist=None, flags=0, reqmat=MMAT_GLBOPT3) -> mbl_array_t Generate microcode of an arbitrary code snippet @param mbr: snippet ranges (C++: const mba_ranges_t &) @param hf: extended error information (if failed) (C++: hexrays_failure_t *) @param retlist: list of registers the snippet returns (C++: const mlist_t *) @param flags: bitwise combination of decompile() flags ... bits (C++: int) @param reqmat: required microcode maturity (C++: mba_maturity_t) @return: pointer to the microcode, NULL if failed. """ return _ida_hexrays.gen_microcode(*args) def mark_cfunc_dirty(*args): """ mark_cfunc_dirty(ea, close_views=False) -> bool Flush the cached decompilation results. Erases a cache entry for the specified function. @param ea: function to erase from the cache (C++: ea_t) @param close_views: close pseudocode windows that show the function (C++: bool) @return: if a cache entry existed. """ return _ida_hexrays.mark_cfunc_dirty(*args) def clear_cached_cfuncs(*args): """ clear_cached_cfuncs() Flush all cached decompilation results. """ return _ida_hexrays.clear_cached_cfuncs(*args) def has_cached_cfunc(*args): """ has_cached_cfunc(ea) -> bool Do we have a cached decompilation result for 'ea'? @param ea (C++: ea_t) """ return _ida_hexrays.has_cached_cfunc(*args) def get_ctype_name(*args): """ get_ctype_name(op) -> char const * """ return _ida_hexrays.get_ctype_name(*args) def create_field_name(*args): """ create_field_name(type, offset=BADADDR) -> qstring """ return _ida_hexrays.create_field_name(*args) hxe_flowchart = _ida_hexrays.hxe_flowchart hxe_stkpnts = _ida_hexrays.hxe_stkpnts hxe_prolog = _ida_hexrays.hxe_prolog hxe_microcode = _ida_hexrays.hxe_microcode hxe_preoptimized = _ida_hexrays.hxe_preoptimized hxe_locopt = _ida_hexrays.hxe_locopt hxe_prealloc = _ida_hexrays.hxe_prealloc hxe_glbopt = _ida_hexrays.hxe_glbopt hxe_structural = _ida_hexrays.hxe_structural hxe_maturity = _ida_hexrays.hxe_maturity hxe_interr = _ida_hexrays.hxe_interr hxe_combine = _ida_hexrays.hxe_combine hxe_print_func = _ida_hexrays.hxe_print_func hxe_func_printed = _ida_hexrays.hxe_func_printed hxe_resolve_stkaddrs = _ida_hexrays.hxe_resolve_stkaddrs hxe_open_pseudocode = _ida_hexrays.hxe_open_pseudocode hxe_switch_pseudocode = _ida_hexrays.hxe_switch_pseudocode hxe_refresh_pseudocode = _ida_hexrays.hxe_refresh_pseudocode hxe_close_pseudocode = _ida_hexrays.hxe_close_pseudocode hxe_keyboard = _ida_hexrays.hxe_keyboard hxe_right_click = _ida_hexrays.hxe_right_click hxe_double_click = _ida_hexrays.hxe_double_click hxe_curpos = _ida_hexrays.hxe_curpos hxe_create_hint = _ida_hexrays.hxe_create_hint hxe_text_ready = _ida_hexrays.hxe_text_ready hxe_populating_popup = _ida_hexrays.hxe_populating_popup lxe_lvar_name_changed = _ida_hexrays.lxe_lvar_name_changed lxe_lvar_type_changed = _ida_hexrays.lxe_lvar_type_changed lxe_lvar_cmt_changed = _ida_hexrays.lxe_lvar_cmt_changed lxe_lvar_mapping_changed = _ida_hexrays.lxe_lvar_mapping_changed hxe_cmt_changed = _ida_hexrays.hxe_cmt_changed USE_KEYBOARD = _ida_hexrays.USE_KEYBOARD USE_MOUSE = _ida_hexrays.USE_MOUSE ctext_position_t_swigregister = _ida_hexrays.ctext_position_t_swigregister ctext_position_t_swigregister(ctext_position_t) HEXRAYS_API_MAGIC = cvar.HEXRAYS_API_MAGIC history_item_t_swigregister = _ida_hexrays.history_item_t_swigregister history_item_t_swigregister(history_item_t) vdui_t_swigregister = _ida_hexrays.vdui_t_swigregister vdui_t_swigregister(vdui_t) CMT_NONE = cvar.CMT_NONE CMT_TAIL = cvar.CMT_TAIL CMT_BLOCK1 = cvar.CMT_BLOCK1 CMT_BLOCK2 = cvar.CMT_BLOCK2 CMT_LVAR = cvar.CMT_LVAR CMT_FUNC = cvar.CMT_FUNC CMT_ALL = cvar.CMT_ALL VDUI_VISIBLE = _ida_hexrays.VDUI_VISIBLE """ is visible? """ VDUI_VALID = _ida_hexrays.VDUI_VALID """ is valid? """ VDUI_LOCKED = _ida_hexrays.VDUI_LOCKED """ is locked? """ ui_stroff_op_t_swigregister = _ida_hexrays.ui_stroff_op_t_swigregister ui_stroff_op_t_swigregister(ui_stroff_op_t) ui_stroff_applicator_t_swigregister = _ida_hexrays.ui_stroff_applicator_t_swigregister ui_stroff_applicator_t_swigregister(ui_stroff_applicator_t) def select_udt_by_offset(*args): """ select_udt_by_offset(udts, ops, applicator) -> int Select UDT @param udts: list of UDT tinfo_t for the selection, if NULL or empty then UDTs from the "Local types" will be used (C++: const qvector < tinfo_t > *) @param ops: operands (C++: const ui_stroff_ops_t &) @param applicator (C++: ui_stroff_applicator_t &) """ return _ida_hexrays.select_udt_by_offset(*args) hx_user_numforms_begin = _ida_hexrays.hx_user_numforms_begin hx_user_numforms_end = _ida_hexrays.hx_user_numforms_end hx_user_numforms_next = _ida_hexrays.hx_user_numforms_next hx_user_numforms_prev = _ida_hexrays.hx_user_numforms_prev hx_user_numforms_first = _ida_hexrays.hx_user_numforms_first hx_user_numforms_second = _ida_hexrays.hx_user_numforms_second hx_user_numforms_find = _ida_hexrays.hx_user_numforms_find hx_user_numforms_insert = _ida_hexrays.hx_user_numforms_insert hx_user_numforms_erase = _ida_hexrays.hx_user_numforms_erase hx_user_numforms_clear = _ida_hexrays.hx_user_numforms_clear hx_user_numforms_size = _ida_hexrays.hx_user_numforms_size hx_user_numforms_free = _ida_hexrays.hx_user_numforms_free hx_user_numforms_new = _ida_hexrays.hx_user_numforms_new hx_lvar_mapping_begin = _ida_hexrays.hx_lvar_mapping_begin hx_lvar_mapping_end = _ida_hexrays.hx_lvar_mapping_end hx_lvar_mapping_next = _ida_hexrays.hx_lvar_mapping_next hx_lvar_mapping_prev = _ida_hexrays.hx_lvar_mapping_prev hx_lvar_mapping_first = _ida_hexrays.hx_lvar_mapping_first hx_lvar_mapping_second = _ida_hexrays.hx_lvar_mapping_second hx_lvar_mapping_find = _ida_hexrays.hx_lvar_mapping_find hx_lvar_mapping_insert = _ida_hexrays.hx_lvar_mapping_insert hx_lvar_mapping_erase = _ida_hexrays.hx_lvar_mapping_erase hx_lvar_mapping_clear = _ida_hexrays.hx_lvar_mapping_clear hx_lvar_mapping_size = _ida_hexrays.hx_lvar_mapping_size hx_lvar_mapping_free = _ida_hexrays.hx_lvar_mapping_free hx_lvar_mapping_new = _ida_hexrays.hx_lvar_mapping_new hx_udcall_map_begin = _ida_hexrays.hx_udcall_map_begin hx_udcall_map_end = _ida_hexrays.hx_udcall_map_end hx_udcall_map_next = _ida_hexrays.hx_udcall_map_next hx_udcall_map_prev = _ida_hexrays.hx_udcall_map_prev hx_udcall_map_first = _ida_hexrays.hx_udcall_map_first hx_udcall_map_second = _ida_hexrays.hx_udcall_map_second hx_udcall_map_find = _ida_hexrays.hx_udcall_map_find hx_udcall_map_insert = _ida_hexrays.hx_udcall_map_insert hx_udcall_map_erase = _ida_hexrays.hx_udcall_map_erase hx_udcall_map_clear = _ida_hexrays.hx_udcall_map_clear hx_udcall_map_size = _ida_hexrays.hx_udcall_map_size hx_udcall_map_free = _ida_hexrays.hx_udcall_map_free hx_udcall_map_new = _ida_hexrays.hx_udcall_map_new hx_user_cmts_begin = _ida_hexrays.hx_user_cmts_begin hx_user_cmts_end = _ida_hexrays.hx_user_cmts_end hx_user_cmts_next = _ida_hexrays.hx_user_cmts_next hx_user_cmts_prev = _ida_hexrays.hx_user_cmts_prev hx_user_cmts_first = _ida_hexrays.hx_user_cmts_first hx_user_cmts_second = _ida_hexrays.hx_user_cmts_second hx_user_cmts_find = _ida_hexrays.hx_user_cmts_find hx_user_cmts_insert = _ida_hexrays.hx_user_cmts_insert hx_user_cmts_erase = _ida_hexrays.hx_user_cmts_erase hx_user_cmts_clear = _ida_hexrays.hx_user_cmts_clear hx_user_cmts_size = _ida_hexrays.hx_user_cmts_size hx_user_cmts_free = _ida_hexrays.hx_user_cmts_free hx_user_cmts_new = _ida_hexrays.hx_user_cmts_new hx_user_iflags_begin = _ida_hexrays.hx_user_iflags_begin hx_user_iflags_end = _ida_hexrays.hx_user_iflags_end hx_user_iflags_next = _ida_hexrays.hx_user_iflags_next hx_user_iflags_prev = _ida_hexrays.hx_user_iflags_prev hx_user_iflags_first = _ida_hexrays.hx_user_iflags_first hx_user_iflags_second = _ida_hexrays.hx_user_iflags_second hx_user_iflags_find = _ida_hexrays.hx_user_iflags_find hx_user_iflags_insert = _ida_hexrays.hx_user_iflags_insert hx_user_iflags_erase = _ida_hexrays.hx_user_iflags_erase hx_user_iflags_clear = _ida_hexrays.hx_user_iflags_clear hx_user_iflags_size = _ida_hexrays.hx_user_iflags_size hx_user_iflags_free = _ida_hexrays.hx_user_iflags_free hx_user_iflags_new = _ida_hexrays.hx_user_iflags_new hx_user_unions_begin = _ida_hexrays.hx_user_unions_begin hx_user_unions_end = _ida_hexrays.hx_user_unions_end hx_user_unions_next = _ida_hexrays.hx_user_unions_next hx_user_unions_prev = _ida_hexrays.hx_user_unions_prev hx_user_unions_first = _ida_hexrays.hx_user_unions_first hx_user_unions_second = _ida_hexrays.hx_user_unions_second hx_user_unions_find = _ida_hexrays.hx_user_unions_find hx_user_unions_insert = _ida_hexrays.hx_user_unions_insert hx_user_unions_erase = _ida_hexrays.hx_user_unions_erase hx_user_unions_clear = _ida_hexrays.hx_user_unions_clear hx_user_unions_size = _ida_hexrays.hx_user_unions_size hx_user_unions_free = _ida_hexrays.hx_user_unions_free hx_user_unions_new = _ida_hexrays.hx_user_unions_new hx_user_labels_begin = _ida_hexrays.hx_user_labels_begin hx_user_labels_end = _ida_hexrays.hx_user_labels_end hx_user_labels_next = _ida_hexrays.hx_user_labels_next hx_user_labels_prev = _ida_hexrays.hx_user_labels_prev hx_user_labels_first = _ida_hexrays.hx_user_labels_first hx_user_labels_second = _ida_hexrays.hx_user_labels_second hx_user_labels_find = _ida_hexrays.hx_user_labels_find hx_user_labels_insert = _ida_hexrays.hx_user_labels_insert hx_user_labels_erase = _ida_hexrays.hx_user_labels_erase hx_user_labels_clear = _ida_hexrays.hx_user_labels_clear hx_user_labels_size = _ida_hexrays.hx_user_labels_size hx_user_labels_free = _ida_hexrays.hx_user_labels_free hx_user_labels_new = _ida_hexrays.hx_user_labels_new hx_eamap_begin = _ida_hexrays.hx_eamap_begin hx_eamap_end = _ida_hexrays.hx_eamap_end hx_eamap_next = _ida_hexrays.hx_eamap_next hx_eamap_prev = _ida_hexrays.hx_eamap_prev hx_eamap_first = _ida_hexrays.hx_eamap_first hx_eamap_second = _ida_hexrays.hx_eamap_second hx_eamap_find = _ida_hexrays.hx_eamap_find hx_eamap_insert = _ida_hexrays.hx_eamap_insert hx_eamap_erase = _ida_hexrays.hx_eamap_erase hx_eamap_clear = _ida_hexrays.hx_eamap_clear hx_eamap_size = _ida_hexrays.hx_eamap_size hx_eamap_free = _ida_hexrays.hx_eamap_free hx_eamap_new = _ida_hexrays.hx_eamap_new hx_boundaries_begin = _ida_hexrays.hx_boundaries_begin hx_boundaries_end = _ida_hexrays.hx_boundaries_end hx_boundaries_next = _ida_hexrays.hx_boundaries_next hx_boundaries_prev = _ida_hexrays.hx_boundaries_prev hx_boundaries_first = _ida_hexrays.hx_boundaries_first hx_boundaries_second = _ida_hexrays.hx_boundaries_second hx_boundaries_find = _ida_hexrays.hx_boundaries_find hx_boundaries_insert = _ida_hexrays.hx_boundaries_insert hx_boundaries_erase = _ida_hexrays.hx_boundaries_erase hx_boundaries_clear = _ida_hexrays.hx_boundaries_clear hx_boundaries_size = _ida_hexrays.hx_boundaries_size hx_boundaries_free = _ida_hexrays.hx_boundaries_free hx_boundaries_new = _ida_hexrays.hx_boundaries_new hx_block_chains_begin = _ida_hexrays.hx_block_chains_begin hx_block_chains_end = _ida_hexrays.hx_block_chains_end hx_block_chains_next = _ida_hexrays.hx_block_chains_next hx_block_chains_prev = _ida_hexrays.hx_block_chains_prev hx_block_chains_get = _ida_hexrays.hx_block_chains_get hx_block_chains_find = _ida_hexrays.hx_block_chains_find hx_block_chains_insert = _ida_hexrays.hx_block_chains_insert hx_block_chains_erase = _ida_hexrays.hx_block_chains_erase hx_block_chains_clear = _ida_hexrays.hx_block_chains_clear hx_block_chains_size = _ida_hexrays.hx_block_chains_size hx_block_chains_free = _ida_hexrays.hx_block_chains_free hx_block_chains_new = _ida_hexrays.hx_block_chains_new hx_valrng_t_clear = _ida_hexrays.hx_valrng_t_clear hx_valrng_t_copy = _ida_hexrays.hx_valrng_t_copy hx_valrng_t_assign = _ida_hexrays.hx_valrng_t_assign hx_valrng_t_compare = _ida_hexrays.hx_valrng_t_compare hx_valrng_t_set_eq = _ida_hexrays.hx_valrng_t_set_eq hx_valrng_t_set_cmp = _ida_hexrays.hx_valrng_t_set_cmp hx_valrng_t_reduce_size = _ida_hexrays.hx_valrng_t_reduce_size hx_valrng_t_intersect_with = _ida_hexrays.hx_valrng_t_intersect_with hx_valrng_t_unite_with = _ida_hexrays.hx_valrng_t_unite_with hx_valrng_t_inverse = _ida_hexrays.hx_valrng_t_inverse hx_valrng_t_has = _ida_hexrays.hx_valrng_t_has hx_valrng_t_print = _ida_hexrays.hx_valrng_t_print hx_valrng_t_dstr = _ida_hexrays.hx_valrng_t_dstr hx_valrng_t_cvt_to_single_value = _ida_hexrays.hx_valrng_t_cvt_to_single_value hx_valrng_t_cvt_to_cmp = _ida_hexrays.hx_valrng_t_cvt_to_cmp hx_get_merror_desc = _ida_hexrays.hx_get_merror_desc hx_reg2mreg = _ida_hexrays.hx_reg2mreg hx_mreg2reg = _ida_hexrays.hx_mreg2reg hx_install_optinsn_handler = _ida_hexrays.hx_install_optinsn_handler hx_remove_optinsn_handler = _ida_hexrays.hx_remove_optinsn_handler hx_install_optblock_handler = _ida_hexrays.hx_install_optblock_handler hx_remove_optblock_handler = _ida_hexrays.hx_remove_optblock_handler hx_must_mcode_close_block = _ida_hexrays.hx_must_mcode_close_block hx_is_mcode_propagatable = _ida_hexrays.hx_is_mcode_propagatable hx_negate_mcode_relation = _ida_hexrays.hx_negate_mcode_relation hx_swap_mcode_relation = _ida_hexrays.hx_swap_mcode_relation hx_get_signed_mcode = _ida_hexrays.hx_get_signed_mcode hx_get_unsigned_mcode = _ida_hexrays.hx_get_unsigned_mcode hx_mcode_modifies_d = _ida_hexrays.hx_mcode_modifies_d hx_operand_locator_t_compare = _ida_hexrays.hx_operand_locator_t_compare hx_vd_printer_t_print = _ida_hexrays.hx_vd_printer_t_print hx_file_printer_t_print = _ida_hexrays.hx_file_printer_t_print hx_qstring_printer_t_print = _ida_hexrays.hx_qstring_printer_t_print hx_dstr = _ida_hexrays.hx_dstr hx_is_type_correct = _ida_hexrays.hx_is_type_correct hx_is_small_udt = _ida_hexrays.hx_is_small_udt hx_is_nonbool_type = _ida_hexrays.hx_is_nonbool_type hx_is_bool_type = _ida_hexrays.hx_is_bool_type hx_partial_type_num = _ida_hexrays.hx_partial_type_num hx_get_float_type = _ida_hexrays.hx_get_float_type hx_get_int_type_by_width_and_sign = _ida_hexrays.hx_get_int_type_by_width_and_sign hx_get_unk_type = _ida_hexrays.hx_get_unk_type hx_dummy_ptrtype = _ida_hexrays.hx_dummy_ptrtype hx_get_member_type = _ida_hexrays.hx_get_member_type hx_make_pointer = _ida_hexrays.hx_make_pointer hx_create_typedef = _ida_hexrays.hx_create_typedef hx_get_type = _ida_hexrays.hx_get_type hx_set_type = _ida_hexrays.hx_set_type hx_vdloc_t_dstr = _ida_hexrays.hx_vdloc_t_dstr hx_vdloc_t_compare = _ida_hexrays.hx_vdloc_t_compare hx_vdloc_t_is_aliasable = _ida_hexrays.hx_vdloc_t_is_aliasable hx_print_vdloc = _ida_hexrays.hx_print_vdloc hx_arglocs_overlap = _ida_hexrays.hx_arglocs_overlap hx_lvar_locator_t_compare = _ida_hexrays.hx_lvar_locator_t_compare hx_lvar_locator_t_dstr = _ida_hexrays.hx_lvar_locator_t_dstr hx_lvar_t_dstr = _ida_hexrays.hx_lvar_t_dstr hx_lvar_t_is_promoted_arg = _ida_hexrays.hx_lvar_t_is_promoted_arg hx_lvar_t_accepts_type = _ida_hexrays.hx_lvar_t_accepts_type hx_lvar_t_set_lvar_type = _ida_hexrays.hx_lvar_t_set_lvar_type hx_lvar_t_set_width = _ida_hexrays.hx_lvar_t_set_width hx_lvar_t_append_list = _ida_hexrays.hx_lvar_t_append_list hx_lvars_t_find_stkvar = _ida_hexrays.hx_lvars_t_find_stkvar hx_lvars_t_find = _ida_hexrays.hx_lvars_t_find hx_lvars_t_find_lvar = _ida_hexrays.hx_lvars_t_find_lvar hx_restore_user_lvar_settings = _ida_hexrays.hx_restore_user_lvar_settings hx_save_user_lvar_settings = _ida_hexrays.hx_save_user_lvar_settings hx_modify_user_lvars = _ida_hexrays.hx_modify_user_lvars hx_restore_user_defined_calls = _ida_hexrays.hx_restore_user_defined_calls hx_save_user_defined_calls = _ida_hexrays.hx_save_user_defined_calls hx_parse_user_call = _ida_hexrays.hx_parse_user_call hx_convert_to_user_call = _ida_hexrays.hx_convert_to_user_call hx_install_microcode_filter = _ida_hexrays.hx_install_microcode_filter hx_udc_filter_t_init = _ida_hexrays.hx_udc_filter_t_init hx_udc_filter_t_apply = _ida_hexrays.hx_udc_filter_t_apply hx_bitset_t_bitset_t = _ida_hexrays.hx_bitset_t_bitset_t hx_bitset_t_copy = _ida_hexrays.hx_bitset_t_copy hx_bitset_t_add = _ida_hexrays.hx_bitset_t_add hx_bitset_t_add_ = _ida_hexrays.hx_bitset_t_add_ hx_bitset_t_add__ = _ida_hexrays.hx_bitset_t_add__ hx_bitset_t_sub = _ida_hexrays.hx_bitset_t_sub hx_bitset_t_sub_ = _ida_hexrays.hx_bitset_t_sub_ hx_bitset_t_sub__ = _ida_hexrays.hx_bitset_t_sub__ hx_bitset_t_cut_at = _ida_hexrays.hx_bitset_t_cut_at hx_bitset_t_shift_down = _ida_hexrays.hx_bitset_t_shift_down hx_bitset_t_has = _ida_hexrays.hx_bitset_t_has hx_bitset_t_has_all = _ida_hexrays.hx_bitset_t_has_all hx_bitset_t_has_any = _ida_hexrays.hx_bitset_t_has_any hx_bitset_t_dstr = _ida_hexrays.hx_bitset_t_dstr hx_bitset_t_empty = _ida_hexrays.hx_bitset_t_empty hx_bitset_t_count = _ida_hexrays.hx_bitset_t_count hx_bitset_t_count_ = _ida_hexrays.hx_bitset_t_count_ hx_bitset_t_last = _ida_hexrays.hx_bitset_t_last hx_bitset_t_fill_with_ones = _ida_hexrays.hx_bitset_t_fill_with_ones hx_bitset_t_has_common = _ida_hexrays.hx_bitset_t_has_common hx_bitset_t_intersect = _ida_hexrays.hx_bitset_t_intersect hx_bitset_t_is_subset_of = _ida_hexrays.hx_bitset_t_is_subset_of hx_bitset_t_compare = _ida_hexrays.hx_bitset_t_compare hx_bitset_t_goup = _ida_hexrays.hx_bitset_t_goup hx_ivl_t_dstr = _ida_hexrays.hx_ivl_t_dstr hx_ivl_t_compare = _ida_hexrays.hx_ivl_t_compare hx_ivlset_t_add = _ida_hexrays.hx_ivlset_t_add hx_ivlset_t_add_ = _ida_hexrays.hx_ivlset_t_add_ hx_ivlset_t_addmasked = _ida_hexrays.hx_ivlset_t_addmasked hx_ivlset_t_sub = _ida_hexrays.hx_ivlset_t_sub hx_ivlset_t_sub_ = _ida_hexrays.hx_ivlset_t_sub_ hx_ivlset_t_has_common = _ida_hexrays.hx_ivlset_t_has_common hx_ivlset_t_print = _ida_hexrays.hx_ivlset_t_print hx_ivlset_t_dstr = _ida_hexrays.hx_ivlset_t_dstr hx_ivlset_t_count = _ida_hexrays.hx_ivlset_t_count hx_ivlset_t_has_common_ = _ida_hexrays.hx_ivlset_t_has_common_ hx_ivlset_t_contains = _ida_hexrays.hx_ivlset_t_contains hx_ivlset_t_includes = _ida_hexrays.hx_ivlset_t_includes hx_ivlset_t_intersect = _ida_hexrays.hx_ivlset_t_intersect hx_ivlset_t_compare = _ida_hexrays.hx_ivlset_t_compare hx_get_mreg_name = _ida_hexrays.hx_get_mreg_name hx_rlist_t_print = _ida_hexrays.hx_rlist_t_print hx_rlist_t_dstr = _ida_hexrays.hx_rlist_t_dstr hx_mlist_t_addmem = _ida_hexrays.hx_mlist_t_addmem hx_mlist_t_print = _ida_hexrays.hx_mlist_t_print hx_mlist_t_dstr = _ida_hexrays.hx_mlist_t_dstr hx_mlist_t_compare = _ida_hexrays.hx_mlist_t_compare hx_lvar_ref_t_compare = _ida_hexrays.hx_lvar_ref_t_compare hx_lvar_ref_t_var = _ida_hexrays.hx_lvar_ref_t_var hx_stkvar_ref_t_compare = _ida_hexrays.hx_stkvar_ref_t_compare hx_stkvar_ref_t_get_stkvar = _ida_hexrays.hx_stkvar_ref_t_get_stkvar hx_fnumber_t_print = _ida_hexrays.hx_fnumber_t_print hx_fnumber_t_dstr = _ida_hexrays.hx_fnumber_t_dstr hx_mop_t_copy = _ida_hexrays.hx_mop_t_copy hx_mop_t_assign = _ida_hexrays.hx_mop_t_assign hx_mop_t_swap = _ida_hexrays.hx_mop_t_swap hx_mop_t_erase = _ida_hexrays.hx_mop_t_erase hx_mop_t_print = _ida_hexrays.hx_mop_t_print hx_mop_t_dstr = _ida_hexrays.hx_mop_t_dstr hx_mop_t_create_from_mlist = _ida_hexrays.hx_mop_t_create_from_mlist hx_mop_t_create_from_ivlset = _ida_hexrays.hx_mop_t_create_from_ivlset hx_mop_t_create_from_vdloc = _ida_hexrays.hx_mop_t_create_from_vdloc hx_mop_t_create_from_scattered_vdloc = _ida_hexrays.hx_mop_t_create_from_scattered_vdloc hx_mop_t_create_from_insn = _ida_hexrays.hx_mop_t_create_from_insn hx_mop_t_make_number = _ida_hexrays.hx_mop_t_make_number hx_mop_t_make_fpnum = _ida_hexrays.hx_mop_t_make_fpnum hx_mop_t_make_reg_pair = _ida_hexrays.hx_mop_t_make_reg_pair hx_mop_t_make_helper = _ida_hexrays.hx_mop_t_make_helper hx_mop_t_is_bit_reg = _ida_hexrays.hx_mop_t_is_bit_reg hx_mop_t_may_use_aliased_memory = _ida_hexrays.hx_mop_t_may_use_aliased_memory hx_mop_t_is01 = _ida_hexrays.hx_mop_t_is01 hx_mop_t_is_sign_extended_from = _ida_hexrays.hx_mop_t_is_sign_extended_from hx_mop_t_is_zero_extended_from = _ida_hexrays.hx_mop_t_is_zero_extended_from hx_mop_t_equal_mops = _ida_hexrays.hx_mop_t_equal_mops hx_mop_t_lexcompare = _ida_hexrays.hx_mop_t_lexcompare hx_mop_t_for_all_ops = _ida_hexrays.hx_mop_t_for_all_ops hx_mop_t_for_all_scattered_submops = _ida_hexrays.hx_mop_t_for_all_scattered_submops hx_mop_t_is_constant = _ida_hexrays.hx_mop_t_is_constant hx_mop_t_get_stkoff = _ida_hexrays.hx_mop_t_get_stkoff hx_mop_t_make_low_half = _ida_hexrays.hx_mop_t_make_low_half hx_mop_t_make_high_half = _ida_hexrays.hx_mop_t_make_high_half hx_mop_t_make_first_half = _ida_hexrays.hx_mop_t_make_first_half hx_mop_t_make_second_half = _ida_hexrays.hx_mop_t_make_second_half hx_mop_t_shift_mop = _ida_hexrays.hx_mop_t_shift_mop hx_mop_t_change_size = _ida_hexrays.hx_mop_t_change_size hx_mop_t_preserve_side_effects = _ida_hexrays.hx_mop_t_preserve_side_effects hx_mop_t_apply_ld_mcode = _ida_hexrays.hx_mop_t_apply_ld_mcode hx_mcallarg_t_print = _ida_hexrays.hx_mcallarg_t_print hx_mcallarg_t_dstr = _ida_hexrays.hx_mcallarg_t_dstr hx_mcallarg_t_set_regarg = _ida_hexrays.hx_mcallarg_t_set_regarg hx_mcallinfo_t_lexcompare = _ida_hexrays.hx_mcallinfo_t_lexcompare hx_mcallinfo_t_set_type = _ida_hexrays.hx_mcallinfo_t_set_type hx_mcallinfo_t_get_type = _ida_hexrays.hx_mcallinfo_t_get_type hx_mcallinfo_t_print = _ida_hexrays.hx_mcallinfo_t_print hx_mcallinfo_t_dstr = _ida_hexrays.hx_mcallinfo_t_dstr hx_mcases_t_compare = _ida_hexrays.hx_mcases_t_compare hx_mcases_t_print = _ida_hexrays.hx_mcases_t_print hx_mcases_t_dstr = _ida_hexrays.hx_mcases_t_dstr hx_vivl_t_extend_to_cover = _ida_hexrays.hx_vivl_t_extend_to_cover hx_vivl_t_intersect = _ida_hexrays.hx_vivl_t_intersect hx_vivl_t_print = _ida_hexrays.hx_vivl_t_print hx_vivl_t_dstr = _ida_hexrays.hx_vivl_t_dstr hx_chain_t_print = _ida_hexrays.hx_chain_t_print hx_chain_t_dstr = _ida_hexrays.hx_chain_t_dstr hx_chain_t_append_list = _ida_hexrays.hx_chain_t_append_list hx_block_chains_t_get_chain = _ida_hexrays.hx_block_chains_t_get_chain hx_block_chains_t_print = _ida_hexrays.hx_block_chains_t_print hx_block_chains_t_dstr = _ida_hexrays.hx_block_chains_t_dstr hx_graph_chains_t_for_all_chains = _ida_hexrays.hx_graph_chains_t_for_all_chains hx_graph_chains_t_release = _ida_hexrays.hx_graph_chains_t_release hx_minsn_t_init = _ida_hexrays.hx_minsn_t_init hx_minsn_t_copy = _ida_hexrays.hx_minsn_t_copy hx_minsn_t_swap = _ida_hexrays.hx_minsn_t_swap hx_minsn_t_print = _ida_hexrays.hx_minsn_t_print hx_minsn_t_dstr = _ida_hexrays.hx_minsn_t_dstr hx_minsn_t_setaddr = _ida_hexrays.hx_minsn_t_setaddr hx_minsn_t_optimize_subtree = _ida_hexrays.hx_minsn_t_optimize_subtree hx_minsn_t_for_all_ops = _ida_hexrays.hx_minsn_t_for_all_ops hx_minsn_t_for_all_insns = _ida_hexrays.hx_minsn_t_for_all_insns hx_minsn_t__make_nop = _ida_hexrays.hx_minsn_t__make_nop hx_minsn_t_equal_insns = _ida_hexrays.hx_minsn_t_equal_insns hx_minsn_t_lexcompare = _ida_hexrays.hx_minsn_t_lexcompare hx_minsn_t_is_noret_call = _ida_hexrays.hx_minsn_t_is_noret_call hx_minsn_t_is_helper = _ida_hexrays.hx_minsn_t_is_helper hx_minsn_t_find_call = _ida_hexrays.hx_minsn_t_find_call hx_minsn_t_has_side_effects = _ida_hexrays.hx_minsn_t_has_side_effects hx_minsn_t_find_opcode = _ida_hexrays.hx_minsn_t_find_opcode hx_minsn_t_find_ins_op = _ida_hexrays.hx_minsn_t_find_ins_op hx_minsn_t_find_num_op = _ida_hexrays.hx_minsn_t_find_num_op hx_minsn_t_modifes_d = _ida_hexrays.hx_minsn_t_modifes_d hx_minsn_t_is_between = _ida_hexrays.hx_minsn_t_is_between hx_minsn_t_may_use_aliased_memory = _ida_hexrays.hx_minsn_t_may_use_aliased_memory hx_getf_reginsn = _ida_hexrays.hx_getf_reginsn hx_getb_reginsn = _ida_hexrays.hx_getb_reginsn hx_mblock_t_init = _ida_hexrays.hx_mblock_t_init hx_mblock_t_print = _ida_hexrays.hx_mblock_t_print hx_mblock_t_dump = _ida_hexrays.hx_mblock_t_dump hx_mblock_t_vdump_block = _ida_hexrays.hx_mblock_t_vdump_block hx_mblock_t_insert_into_block = _ida_hexrays.hx_mblock_t_insert_into_block hx_mblock_t_remove_from_block = _ida_hexrays.hx_mblock_t_remove_from_block hx_mblock_t_for_all_insns = _ida_hexrays.hx_mblock_t_for_all_insns hx_mblock_t_for_all_ops = _ida_hexrays.hx_mblock_t_for_all_ops hx_mblock_t_for_all_uses = _ida_hexrays.hx_mblock_t_for_all_uses hx_mblock_t_optimize_insn = _ida_hexrays.hx_mblock_t_optimize_insn hx_mblock_t_optimize_block = _ida_hexrays.hx_mblock_t_optimize_block hx_mblock_t_build_lists = _ida_hexrays.hx_mblock_t_build_lists hx_mblock_t_append_use_list = _ida_hexrays.hx_mblock_t_append_use_list hx_mblock_t_append_def_list = _ida_hexrays.hx_mblock_t_append_def_list hx_mblock_t_build_use_list = _ida_hexrays.hx_mblock_t_build_use_list hx_mblock_t_build_def_list = _ida_hexrays.hx_mblock_t_build_def_list hx_mblock_t_find_first_use = _ida_hexrays.hx_mblock_t_find_first_use hx_mblock_t_find_redefinition = _ida_hexrays.hx_mblock_t_find_redefinition hx_mblock_t_is_rhs_redefined = _ida_hexrays.hx_mblock_t_is_rhs_redefined hx_mblock_t_find_access = _ida_hexrays.hx_mblock_t_find_access hx_mblock_t_get_valranges = _ida_hexrays.hx_mblock_t_get_valranges hx_mbl_array_t_idaloc2vd = _ida_hexrays.hx_mbl_array_t_idaloc2vd hx_mbl_array_t_vd2idaloc = _ida_hexrays.hx_mbl_array_t_vd2idaloc hx_mbl_array_t_term = _ida_hexrays.hx_mbl_array_t_term hx_mbl_array_t_optimize_local = _ida_hexrays.hx_mbl_array_t_optimize_local hx_mbl_array_t_build_graph = _ida_hexrays.hx_mbl_array_t_build_graph hx_mbl_array_t_get_graph = _ida_hexrays.hx_mbl_array_t_get_graph hx_mbl_array_t_analyze_calls = _ida_hexrays.hx_mbl_array_t_analyze_calls hx_mbl_array_t_optimize_global = _ida_hexrays.hx_mbl_array_t_optimize_global hx_mbl_array_t_alloc_lvars = _ida_hexrays.hx_mbl_array_t_alloc_lvars hx_mbl_array_t_dump = _ida_hexrays.hx_mbl_array_t_dump hx_mbl_array_t_vdump_mba = _ida_hexrays.hx_mbl_array_t_vdump_mba hx_mbl_array_t_print = _ida_hexrays.hx_mbl_array_t_print hx_mbl_array_t_verify = _ida_hexrays.hx_mbl_array_t_verify hx_mbl_array_t_mark_chains_dirty = _ida_hexrays.hx_mbl_array_t_mark_chains_dirty hx_mbl_array_t_insert_block = _ida_hexrays.hx_mbl_array_t_insert_block hx_mbl_array_t_remove_block = _ida_hexrays.hx_mbl_array_t_remove_block hx_mbl_array_t_remove_empty_blocks = _ida_hexrays.hx_mbl_array_t_remove_empty_blocks hx_mbl_array_t_combine_blocks = _ida_hexrays.hx_mbl_array_t_combine_blocks hx_mbl_array_t_for_all_ops = _ida_hexrays.hx_mbl_array_t_for_all_ops hx_mbl_array_t_for_all_insns = _ida_hexrays.hx_mbl_array_t_for_all_insns hx_mbl_array_t_for_all_topinsns = _ida_hexrays.hx_mbl_array_t_for_all_topinsns hx_mbl_array_t_find_mop = _ida_hexrays.hx_mbl_array_t_find_mop hx_mbl_array_t_arg = _ida_hexrays.hx_mbl_array_t_arg hx_mbl_array_t_serialize = _ida_hexrays.hx_mbl_array_t_serialize hx_mbl_array_t_deserialize = _ida_hexrays.hx_mbl_array_t_deserialize hx_mbl_graph_t_is_accessed_globally = _ida_hexrays.hx_mbl_graph_t_is_accessed_globally hx_mbl_graph_t_get_ud = _ida_hexrays.hx_mbl_graph_t_get_ud hx_mbl_graph_t_get_du = _ida_hexrays.hx_mbl_graph_t_get_du hx_codegen_t_emit = _ida_hexrays.hx_codegen_t_emit hx_codegen_t_emit_ = _ida_hexrays.hx_codegen_t_emit_ hx_is_kreg = _ida_hexrays.hx_is_kreg hx_get_temp_regs = _ida_hexrays.hx_get_temp_regs hx_get_hexrays_version = _ida_hexrays.hx_get_hexrays_version hx_open_pseudocode = _ida_hexrays.hx_open_pseudocode hx_close_pseudocode = _ida_hexrays.hx_close_pseudocode hx_get_widget_vdui = _ida_hexrays.hx_get_widget_vdui hx_decompile_many = _ida_hexrays.hx_decompile_many hx_hexrays_failure_t_desc = _ida_hexrays.hx_hexrays_failure_t_desc hx_send_database = _ida_hexrays.hx_send_database hx_gco_info_t_append_to_list = _ida_hexrays.hx_gco_info_t_append_to_list hx_get_current_operand = _ida_hexrays.hx_get_current_operand hx_remitem = _ida_hexrays.hx_remitem hx_negated_relation = _ida_hexrays.hx_negated_relation hx_swapped_relation = _ida_hexrays.hx_swapped_relation hx_get_op_signness = _ida_hexrays.hx_get_op_signness hx_asgop = _ida_hexrays.hx_asgop hx_asgop_revert = _ida_hexrays.hx_asgop_revert hx_cnumber_t_print = _ida_hexrays.hx_cnumber_t_print hx_cnumber_t_value = _ida_hexrays.hx_cnumber_t_value hx_cnumber_t_assign = _ida_hexrays.hx_cnumber_t_assign hx_cnumber_t_compare = _ida_hexrays.hx_cnumber_t_compare hx_var_ref_t_compare = _ida_hexrays.hx_var_ref_t_compare hx_ctree_visitor_t_apply_to = _ida_hexrays.hx_ctree_visitor_t_apply_to hx_ctree_visitor_t_apply_to_exprs = _ida_hexrays.hx_ctree_visitor_t_apply_to_exprs hx_ctree_parentee_t_recalc_parent_types = _ida_hexrays.hx_ctree_parentee_t_recalc_parent_types hx_cfunc_parentee_t_calc_rvalue_type = _ida_hexrays.hx_cfunc_parentee_t_calc_rvalue_type hx_citem_locator_t_compare = _ida_hexrays.hx_citem_locator_t_compare hx_citem_t_contains_expr = _ida_hexrays.hx_citem_t_contains_expr hx_citem_t_contains_label = _ida_hexrays.hx_citem_t_contains_label hx_citem_t_find_parent_of = _ida_hexrays.hx_citem_t_find_parent_of hx_citem_t_find_closest_addr = _ida_hexrays.hx_citem_t_find_closest_addr hx_cexpr_t_assign = _ida_hexrays.hx_cexpr_t_assign hx_cexpr_t_compare = _ida_hexrays.hx_cexpr_t_compare hx_cexpr_t_replace_by = _ida_hexrays.hx_cexpr_t_replace_by hx_cexpr_t_cleanup = _ida_hexrays.hx_cexpr_t_cleanup hx_cexpr_t_put_number = _ida_hexrays.hx_cexpr_t_put_number hx_cexpr_t_print1 = _ida_hexrays.hx_cexpr_t_print1 hx_cexpr_t_calc_type = _ida_hexrays.hx_cexpr_t_calc_type hx_cexpr_t_equal_effect = _ida_hexrays.hx_cexpr_t_equal_effect hx_cexpr_t_is_child_of = _ida_hexrays.hx_cexpr_t_is_child_of hx_cexpr_t_contains_operator = _ida_hexrays.hx_cexpr_t_contains_operator hx_cexpr_t_get_high_nbit_bound = _ida_hexrays.hx_cexpr_t_get_high_nbit_bound hx_cexpr_t_get_low_nbit_bound = _ida_hexrays.hx_cexpr_t_get_low_nbit_bound hx_cexpr_t_requires_lvalue = _ida_hexrays.hx_cexpr_t_requires_lvalue hx_cexpr_t_has_side_effects = _ida_hexrays.hx_cexpr_t_has_side_effects hx_cif_t_assign = _ida_hexrays.hx_cif_t_assign hx_cif_t_compare = _ida_hexrays.hx_cif_t_compare hx_cloop_t_assign = _ida_hexrays.hx_cloop_t_assign hx_cfor_t_compare = _ida_hexrays.hx_cfor_t_compare hx_cwhile_t_compare = _ida_hexrays.hx_cwhile_t_compare hx_cdo_t_compare = _ida_hexrays.hx_cdo_t_compare hx_creturn_t_compare = _ida_hexrays.hx_creturn_t_compare hx_cgoto_t_compare = _ida_hexrays.hx_cgoto_t_compare hx_casm_t_compare = _ida_hexrays.hx_casm_t_compare hx_cinsn_t_assign = _ida_hexrays.hx_cinsn_t_assign hx_cinsn_t_compare = _ida_hexrays.hx_cinsn_t_compare hx_cinsn_t_replace_by = _ida_hexrays.hx_cinsn_t_replace_by hx_cinsn_t_cleanup = _ida_hexrays.hx_cinsn_t_cleanup hx_cinsn_t_new_insn = _ida_hexrays.hx_cinsn_t_new_insn hx_cinsn_t_create_if = _ida_hexrays.hx_cinsn_t_create_if hx_cinsn_t_print = _ida_hexrays.hx_cinsn_t_print hx_cinsn_t_print1 = _ida_hexrays.hx_cinsn_t_print1 hx_cinsn_t_is_ordinary_flow = _ida_hexrays.hx_cinsn_t_is_ordinary_flow hx_cinsn_t_contains_insn = _ida_hexrays.hx_cinsn_t_contains_insn hx_cinsn_t_collect_free_breaks = _ida_hexrays.hx_cinsn_t_collect_free_breaks hx_cinsn_t_collect_free_continues = _ida_hexrays.hx_cinsn_t_collect_free_continues hx_cblock_t_compare = _ida_hexrays.hx_cblock_t_compare hx_carglist_t_compare = _ida_hexrays.hx_carglist_t_compare hx_ccase_t_compare = _ida_hexrays.hx_ccase_t_compare hx_ccases_t_compare = _ida_hexrays.hx_ccases_t_compare hx_cswitch_t_compare = _ida_hexrays.hx_cswitch_t_compare hx_ctree_item_t_get_memptr = _ida_hexrays.hx_ctree_item_t_get_memptr hx_ctree_item_t_get_lvar = _ida_hexrays.hx_ctree_item_t_get_lvar hx_ctree_item_t_get_ea = _ida_hexrays.hx_ctree_item_t_get_ea hx_ctree_item_t_get_label_num = _ida_hexrays.hx_ctree_item_t_get_label_num hx_lnot = _ida_hexrays.hx_lnot hx_new_block = _ida_hexrays.hx_new_block hx_vcreate_helper = _ida_hexrays.hx_vcreate_helper hx_vcall_helper = _ida_hexrays.hx_vcall_helper hx_make_num = _ida_hexrays.hx_make_num hx_make_ref = _ida_hexrays.hx_make_ref hx_dereference = _ida_hexrays.hx_dereference hx_save_user_labels = _ida_hexrays.hx_save_user_labels hx_save_user_cmts = _ida_hexrays.hx_save_user_cmts hx_save_user_numforms = _ida_hexrays.hx_save_user_numforms hx_save_user_iflags = _ida_hexrays.hx_save_user_iflags hx_save_user_unions = _ida_hexrays.hx_save_user_unions hx_restore_user_labels = _ida_hexrays.hx_restore_user_labels hx_restore_user_cmts = _ida_hexrays.hx_restore_user_cmts hx_restore_user_numforms = _ida_hexrays.hx_restore_user_numforms hx_restore_user_iflags = _ida_hexrays.hx_restore_user_iflags hx_restore_user_unions = _ida_hexrays.hx_restore_user_unions hx_cfunc_t_build_c_tree = _ida_hexrays.hx_cfunc_t_build_c_tree hx_cfunc_t_verify = _ida_hexrays.hx_cfunc_t_verify hx_cfunc_t_print_dcl = _ida_hexrays.hx_cfunc_t_print_dcl hx_cfunc_t_print_func = _ida_hexrays.hx_cfunc_t_print_func hx_cfunc_t_get_func_type = _ida_hexrays.hx_cfunc_t_get_func_type hx_cfunc_t_get_lvars = _ida_hexrays.hx_cfunc_t_get_lvars hx_cfunc_t_get_stkoff_delta = _ida_hexrays.hx_cfunc_t_get_stkoff_delta hx_cfunc_t_find_label = _ida_hexrays.hx_cfunc_t_find_label hx_cfunc_t_remove_unused_labels = _ida_hexrays.hx_cfunc_t_remove_unused_labels hx_cfunc_t_get_user_cmt = _ida_hexrays.hx_cfunc_t_get_user_cmt hx_cfunc_t_set_user_cmt = _ida_hexrays.hx_cfunc_t_set_user_cmt hx_cfunc_t_get_user_iflags = _ida_hexrays.hx_cfunc_t_get_user_iflags hx_cfunc_t_set_user_iflags = _ida_hexrays.hx_cfunc_t_set_user_iflags hx_cfunc_t_has_orphan_cmts = _ida_hexrays.hx_cfunc_t_has_orphan_cmts hx_cfunc_t_del_orphan_cmts = _ida_hexrays.hx_cfunc_t_del_orphan_cmts hx_cfunc_t_get_user_union_selection = _ida_hexrays.hx_cfunc_t_get_user_union_selection hx_cfunc_t_set_user_union_selection = _ida_hexrays.hx_cfunc_t_set_user_union_selection hx_cfunc_t_get_line_item = _ida_hexrays.hx_cfunc_t_get_line_item hx_cfunc_t_get_warnings = _ida_hexrays.hx_cfunc_t_get_warnings hx_cfunc_t_get_eamap = _ida_hexrays.hx_cfunc_t_get_eamap hx_cfunc_t_get_boundaries = _ida_hexrays.hx_cfunc_t_get_boundaries hx_cfunc_t_get_pseudocode = _ida_hexrays.hx_cfunc_t_get_pseudocode hx_cfunc_t_gather_derefs = _ida_hexrays.hx_cfunc_t_gather_derefs hx_cfunc_t_find_item_coords = _ida_hexrays.hx_cfunc_t_find_item_coords hx_cfunc_t_cleanup = _ida_hexrays.hx_cfunc_t_cleanup hx_decompile = _ida_hexrays.hx_decompile hx_gen_microcode = _ida_hexrays.hx_gen_microcode hx_mark_cfunc_dirty = _ida_hexrays.hx_mark_cfunc_dirty hx_clear_cached_cfuncs = _ida_hexrays.hx_clear_cached_cfuncs hx_has_cached_cfunc = _ida_hexrays.hx_has_cached_cfunc hx_get_ctype_name = _ida_hexrays.hx_get_ctype_name hx_create_field_name = _ida_hexrays.hx_create_field_name hx_install_hexrays_callback = _ida_hexrays.hx_install_hexrays_callback hx_remove_hexrays_callback = _ida_hexrays.hx_remove_hexrays_callback hx_vdui_t_set_locked = _ida_hexrays.hx_vdui_t_set_locked hx_vdui_t_refresh_view = _ida_hexrays.hx_vdui_t_refresh_view hx_vdui_t_refresh_ctext = _ida_hexrays.hx_vdui_t_refresh_ctext hx_vdui_t_switch_to = _ida_hexrays.hx_vdui_t_switch_to hx_vdui_t_get_number = _ida_hexrays.hx_vdui_t_get_number hx_vdui_t_get_current_label = _ida_hexrays.hx_vdui_t_get_current_label hx_vdui_t_clear = _ida_hexrays.hx_vdui_t_clear hx_vdui_t_refresh_cpos = _ida_hexrays.hx_vdui_t_refresh_cpos hx_vdui_t_get_current_item = _ida_hexrays.hx_vdui_t_get_current_item hx_vdui_t_ui_rename_lvar = _ida_hexrays.hx_vdui_t_ui_rename_lvar hx_vdui_t_rename_lvar = _ida_hexrays.hx_vdui_t_rename_lvar hx_vdui_t_ui_set_call_type = _ida_hexrays.hx_vdui_t_ui_set_call_type hx_vdui_t_ui_set_lvar_type = _ida_hexrays.hx_vdui_t_ui_set_lvar_type hx_vdui_t_set_lvar_type = _ida_hexrays.hx_vdui_t_set_lvar_type hx_vdui_t_ui_edit_lvar_cmt = _ida_hexrays.hx_vdui_t_ui_edit_lvar_cmt hx_vdui_t_set_lvar_cmt = _ida_hexrays.hx_vdui_t_set_lvar_cmt hx_vdui_t_ui_map_lvar = _ida_hexrays.hx_vdui_t_ui_map_lvar hx_vdui_t_ui_unmap_lvar = _ida_hexrays.hx_vdui_t_ui_unmap_lvar hx_vdui_t_map_lvar = _ida_hexrays.hx_vdui_t_map_lvar hx_vdui_t_set_strmem_type = _ida_hexrays.hx_vdui_t_set_strmem_type hx_vdui_t_rename_strmem = _ida_hexrays.hx_vdui_t_rename_strmem hx_vdui_t_set_global_type = _ida_hexrays.hx_vdui_t_set_global_type hx_vdui_t_rename_global = _ida_hexrays.hx_vdui_t_rename_global hx_vdui_t_rename_label = _ida_hexrays.hx_vdui_t_rename_label hx_vdui_t_jump_enter = _ida_hexrays.hx_vdui_t_jump_enter hx_vdui_t_ctree_to_disasm = _ida_hexrays.hx_vdui_t_ctree_to_disasm hx_vdui_t_calc_cmt_type = _ida_hexrays.hx_vdui_t_calc_cmt_type hx_vdui_t_edit_cmt = _ida_hexrays.hx_vdui_t_edit_cmt hx_vdui_t_edit_func_cmt = _ida_hexrays.hx_vdui_t_edit_func_cmt hx_vdui_t_del_orphan_cmts = _ida_hexrays.hx_vdui_t_del_orphan_cmts hx_vdui_t_set_num_radix = _ida_hexrays.hx_vdui_t_set_num_radix hx_vdui_t_set_num_enum = _ida_hexrays.hx_vdui_t_set_num_enum hx_vdui_t_set_num_stroff = _ida_hexrays.hx_vdui_t_set_num_stroff hx_vdui_t_invert_sign = _ida_hexrays.hx_vdui_t_invert_sign hx_vdui_t_invert_bits = _ida_hexrays.hx_vdui_t_invert_bits hx_vdui_t_collapse_item = _ida_hexrays.hx_vdui_t_collapse_item hx_vdui_t_collapse_lvars = _ida_hexrays.hx_vdui_t_collapse_lvars hx_vdui_t_split_item = _ida_hexrays.hx_vdui_t_split_item hx_hexrays_alloc = _ida_hexrays.hx_hexrays_alloc hx_hexrays_free = _ida_hexrays.hx_hexrays_free hx_vdui_t_set_noptr_lvar = _ida_hexrays.hx_vdui_t_set_noptr_lvar hx_select_udt_by_offset = _ida_hexrays.hx_select_udt_by_offset hx_mblock_t_get_valranges_ = _ida_hexrays.hx_mblock_t_get_valranges_ hx_cfunc_t_refresh_func_ctext = _ida_hexrays.hx_cfunc_t_refresh_func_ctext hx_checkout_hexrays_license = _ida_hexrays.hx_checkout_hexrays_license hx_mbl_array_t_copy_block = _ida_hexrays.hx_mbl_array_t_copy_block hx_mblock_t_optimize_useless_jump = _ida_hexrays.hx_mblock_t_optimize_useless_jump hx_mblock_t_get_reginsn_qty = _ida_hexrays.hx_mblock_t_get_reginsn_qty user_numforms_iterator_t_swigregister = _ida_hexrays.user_numforms_iterator_t_swigregister user_numforms_iterator_t_swigregister(user_numforms_iterator_t) def user_numforms_begin(*args): """ user_numforms_begin(map) -> user_numforms_iterator_t Get iterator pointing to the beginning of user_numforms_t. @param map (C++: const user_numforms_t *) """ return _ida_hexrays.user_numforms_begin(*args) def user_numforms_end(*args): """ user_numforms_end(map) -> user_numforms_iterator_t Get iterator pointing to the end of user_numforms_t. @param map (C++: const user_numforms_t *) """ return _ida_hexrays.user_numforms_end(*args) def user_numforms_next(*args): """ user_numforms_next(p) -> user_numforms_iterator_t Move to the next element. @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_next(*args) def user_numforms_prev(*args): """ user_numforms_prev(p) -> user_numforms_iterator_t Move to the previous element. @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_prev(*args) def user_numforms_first(*args): """ user_numforms_first(p) -> operand_locator_t Get reference to the current map key. @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_first(*args) def user_numforms_second(*args): """ user_numforms_second(p) -> number_format_t Get reference to the current map value. @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_second(*args) def user_numforms_find(*args): """ user_numforms_find(map, key) -> user_numforms_iterator_t Find the specified key in user_numforms_t. @param map (C++: const user_numforms_t *) @param key (C++: const operand_locator_t &) """ return _ida_hexrays.user_numforms_find(*args) def user_numforms_insert(*args): """ user_numforms_insert(map, key, val) -> user_numforms_iterator_t Insert new ( 'operand_locator_t' , 'number_format_t' ) pair into user_numforms_t. @param map (C++: user_numforms_t *) @param key (C++: const operand_locator_t &) @param val (C++: const number_format_t &) """ return _ida_hexrays.user_numforms_insert(*args) def user_numforms_erase(*args): """ user_numforms_erase(map, p) Erase current element from user_numforms_t. @param map (C++: user_numforms_t *) @param p (C++: user_numforms_iterator_t) """ return _ida_hexrays.user_numforms_erase(*args) def user_numforms_clear(*args): """ user_numforms_clear(map) Clear user_numforms_t. @param map (C++: user_numforms_t *) """ return _ida_hexrays.user_numforms_clear(*args) def user_numforms_size(*args): """ user_numforms_size(map) -> size_t Get size of user_numforms_t. @param map (C++: user_numforms_t *) """ return _ida_hexrays.user_numforms_size(*args) def user_numforms_free(*args): """ user_numforms_free(map) Delete user_numforms_t instance. @param map (C++: user_numforms_t *) """ return _ida_hexrays.user_numforms_free(*args) def user_numforms_new(*args): """ user_numforms_new() -> user_numforms_t Create a new user_numforms_t instance. """ return _ida_hexrays.user_numforms_new(*args) lvar_mapping_iterator_t_swigregister = _ida_hexrays.lvar_mapping_iterator_t_swigregister lvar_mapping_iterator_t_swigregister(lvar_mapping_iterator_t) def lvar_mapping_begin(*args): """ lvar_mapping_begin(map) -> lvar_mapping_iterator_t Get iterator pointing to the beginning of lvar_mapping_t. @param map (C++: const lvar_mapping_t *) """ return _ida_hexrays.lvar_mapping_begin(*args) def lvar_mapping_end(*args): """ lvar_mapping_end(map) -> lvar_mapping_iterator_t Get iterator pointing to the end of lvar_mapping_t. @param map (C++: const lvar_mapping_t *) """ return _ida_hexrays.lvar_mapping_end(*args) def lvar_mapping_next(*args): """ lvar_mapping_next(p) -> lvar_mapping_iterator_t Move to the next element. @param p (C++: lvar_mapping_iterator_t) """ return _ida_hexrays.lvar_mapping_next(*args) def lvar_mapping_prev(*args): """ lvar_mapping_prev(p) -> lvar_mapping_iterator_t Move to the previous element. @param p (C++: lvar_mapping_iterator_t) """ return _ida_hexrays.lvar_mapping_prev(*args) def lvar_mapping_first(*args): """ lvar_mapping_first(p) -> lvar_locator_t Get reference to the current map key. @param p (C++: lvar_mapping_iterator_t) """ return _ida_hexrays.lvar_mapping_first(*args) def lvar_mapping_second(*args): """ lvar_mapping_second(p) -> lvar_locator_t Get reference to the current map value. @param p (C++: lvar_mapping_iterator_t) """ return _ida_hexrays.lvar_mapping_second(*args) def lvar_mapping_find(*args): """ lvar_mapping_find(map, key) -> lvar_mapping_iterator_t Find the specified key in lvar_mapping_t. @param map (C++: const lvar_mapping_t *) @param key (C++: const lvar_locator_t &) """ return _ida_hexrays.lvar_mapping_find(*args) def lvar_mapping_insert(*args): """ lvar_mapping_insert(map, key, val) -> lvar_mapping_iterator_t Insert new ( 'lvar_locator_t' , 'lvar_locator_t' ) pair into lvar_mapping_t. @param map (C++: lvar_mapping_t *) @param key (C++: const lvar_locator_t &) @param val (C++: const lvar_locator_t &) """ return _ida_hexrays.lvar_mapping_insert(*args) def lvar_mapping_erase(*args): """ lvar_mapping_erase(map, p) Erase current element from lvar_mapping_t. @param map (C++: lvar_mapping_t *) @param p (C++: lvar_mapping_iterator_t) """ return _ida_hexrays.lvar_mapping_erase(*args) def lvar_mapping_clear(*args): """ lvar_mapping_clear(map) Clear lvar_mapping_t. @param map (C++: lvar_mapping_t *) """ return _ida_hexrays.lvar_mapping_clear(*args) def lvar_mapping_size(*args): """ lvar_mapping_size(map) -> size_t Get size of lvar_mapping_t. @param map (C++: lvar_mapping_t *) """ return _ida_hexrays.lvar_mapping_size(*args) def lvar_mapping_free(*args): """ lvar_mapping_free(map) Delete lvar_mapping_t instance. @param map (C++: lvar_mapping_t *) """ return _ida_hexrays.lvar_mapping_free(*args) def lvar_mapping_new(*args): """ lvar_mapping_new() -> lvar_mapping_t Create a new lvar_mapping_t instance. """ return _ida_hexrays.lvar_mapping_new(*args) udcall_map_iterator_t_swigregister = _ida_hexrays.udcall_map_iterator_t_swigregister udcall_map_iterator_t_swigregister(udcall_map_iterator_t) def udcall_map_begin(*args): """ udcall_map_begin(map) -> udcall_map_iterator_t Get iterator pointing to the beginning of udcall_map_t. @param map (C++: const udcall_map_t *) """ return _ida_hexrays.udcall_map_begin(*args) def udcall_map_end(*args): """ udcall_map_end(map) -> udcall_map_iterator_t Get iterator pointing to the end of udcall_map_t. @param map (C++: const udcall_map_t *) """ return _ida_hexrays.udcall_map_end(*args) def udcall_map_next(*args): """ udcall_map_next(p) -> udcall_map_iterator_t Move to the next element. @param p (C++: udcall_map_iterator_t) """ return _ida_hexrays.udcall_map_next(*args) def udcall_map_prev(*args): """ udcall_map_prev(p) -> udcall_map_iterator_t Move to the previous element. @param p (C++: udcall_map_iterator_t) """ return _ida_hexrays.udcall_map_prev(*args) def udcall_map_first(*args): """ udcall_map_first(p) -> ea_t const & Get reference to the current map key. @param p (C++: udcall_map_iterator_t) """ return _ida_hexrays.udcall_map_first(*args) def udcall_map_second(*args): """ udcall_map_second(p) -> udcall_t Get reference to the current map value. @param p (C++: udcall_map_iterator_t) """ return _ida_hexrays.udcall_map_second(*args) def udcall_map_find(*args): """ udcall_map_find(map, key) -> udcall_map_iterator_t Find the specified key in udcall_map_t. @param map (C++: const udcall_map_t *) @param key (C++: const ea_t &) """ return _ida_hexrays.udcall_map_find(*args) def udcall_map_insert(*args): """ udcall_map_insert(map, key, val) -> udcall_map_iterator_t Insert new (ea_t, 'udcall_t' ) pair into udcall_map_t. @param map (C++: udcall_map_t *) @param key (C++: const ea_t &) @param val (C++: const udcall_t &) """ return _ida_hexrays.udcall_map_insert(*args) def udcall_map_erase(*args): """ udcall_map_erase(map, p) Erase current element from udcall_map_t. @param map (C++: udcall_map_t *) @param p (C++: udcall_map_iterator_t) """ return _ida_hexrays.udcall_map_erase(*args) def udcall_map_clear(*args): """ udcall_map_clear(map) Clear udcall_map_t. @param map (C++: udcall_map_t *) """ return _ida_hexrays.udcall_map_clear(*args) def udcall_map_size(*args): """ udcall_map_size(map) -> size_t Get size of udcall_map_t. @param map (C++: udcall_map_t *) """ return _ida_hexrays.udcall_map_size(*args) def udcall_map_free(*args): """ udcall_map_free(map) Delete udcall_map_t instance. @param map (C++: udcall_map_t *) """ return _ida_hexrays.udcall_map_free(*args) def udcall_map_new(*args): """ udcall_map_new() -> udcall_map_t * Create a new udcall_map_t instance. """ return _ida_hexrays.udcall_map_new(*args) user_cmts_iterator_t_swigregister = _ida_hexrays.user_cmts_iterator_t_swigregister user_cmts_iterator_t_swigregister(user_cmts_iterator_t) def user_cmts_begin(*args): """ user_cmts_begin(map) -> user_cmts_iterator_t Get iterator pointing to the beginning of user_cmts_t. @param map (C++: const user_cmts_t *) """ return _ida_hexrays.user_cmts_begin(*args) def user_cmts_end(*args): """ user_cmts_end(map) -> user_cmts_iterator_t Get iterator pointing to the end of user_cmts_t. @param map (C++: const user_cmts_t *) """ return _ida_hexrays.user_cmts_end(*args) def user_cmts_next(*args): """ user_cmts_next(p) -> user_cmts_iterator_t Move to the next element. @param p (C++: user_cmts_iterator_t) """ return _ida_hexrays.user_cmts_next(*args) def user_cmts_prev(*args): """ user_cmts_prev(p) -> user_cmts_iterator_t Move to the previous element. @param p (C++: user_cmts_iterator_t) """ return _ida_hexrays.user_cmts_prev(*args) def user_cmts_first(*args): """ user_cmts_first(p) -> treeloc_t Get reference to the current map key. @param p (C++: user_cmts_iterator_t) """ return _ida_hexrays.user_cmts_first(*args) def user_cmts_second(*args): """ user_cmts_second(p) -> citem_cmt_t Get reference to the current map value. @param p (C++: user_cmts_iterator_t) """ return _ida_hexrays.user_cmts_second(*args) def user_cmts_find(*args): """ user_cmts_find(map, key) -> user_cmts_iterator_t Find the specified key in user_cmts_t. @param map (C++: const user_cmts_t *) @param key (C++: const treeloc_t &) """ return _ida_hexrays.user_cmts_find(*args) def user_cmts_insert(*args): """ user_cmts_insert(map, key, val) -> user_cmts_iterator_t Insert new ( 'treeloc_t' , 'citem_cmt_t' ) pair into user_cmts_t. @param map (C++: user_cmts_t *) @param key (C++: const treeloc_t &) @param val (C++: const citem_cmt_t &) """ return _ida_hexrays.user_cmts_insert(*args) def user_cmts_erase(*args): """ user_cmts_erase(map, p) Erase current element from user_cmts_t. @param map (C++: user_cmts_t *) @param p (C++: user_cmts_iterator_t) """ return _ida_hexrays.user_cmts_erase(*args) def user_cmts_clear(*args): """ user_cmts_clear(map) Clear user_cmts_t. @param map (C++: user_cmts_t *) """ return _ida_hexrays.user_cmts_clear(*args) def user_cmts_size(*args): """ user_cmts_size(map) -> size_t Get size of user_cmts_t. @param map (C++: user_cmts_t *) """ return _ida_hexrays.user_cmts_size(*args) def user_cmts_free(*args): """ user_cmts_free(map) Delete user_cmts_t instance. @param map (C++: user_cmts_t *) """ return _ida_hexrays.user_cmts_free(*args) def user_cmts_new(*args): """ user_cmts_new() -> user_cmts_t Create a new user_cmts_t instance. """ return _ida_hexrays.user_cmts_new(*args) user_iflags_iterator_t_swigregister = _ida_hexrays.user_iflags_iterator_t_swigregister user_iflags_iterator_t_swigregister(user_iflags_iterator_t) def user_iflags_begin(*args): """ user_iflags_begin(map) -> user_iflags_iterator_t Get iterator pointing to the beginning of user_iflags_t. @param map (C++: const user_iflags_t *) """ return _ida_hexrays.user_iflags_begin(*args) def user_iflags_end(*args): """ user_iflags_end(map) -> user_iflags_iterator_t Get iterator pointing to the end of user_iflags_t. @param map (C++: const user_iflags_t *) """ return _ida_hexrays.user_iflags_end(*args) def user_iflags_next(*args): """ user_iflags_next(p) -> user_iflags_iterator_t Move to the next element. @param p (C++: user_iflags_iterator_t) """ return _ida_hexrays.user_iflags_next(*args) def user_iflags_prev(*args): """ user_iflags_prev(p) -> user_iflags_iterator_t Move to the previous element. @param p (C++: user_iflags_iterator_t) """ return _ida_hexrays.user_iflags_prev(*args) def user_iflags_first(*args): """ user_iflags_first(p) -> citem_locator_t Get reference to the current map key. @param p (C++: user_iflags_iterator_t) """ return _ida_hexrays.user_iflags_first(*args) def user_iflags_find(*args): """ user_iflags_find(map, key) -> user_iflags_iterator_t Find the specified key in user_iflags_t. @param map (C++: const user_iflags_t *) @param key (C++: const citem_locator_t &) """ return _ida_hexrays.user_iflags_find(*args) def user_iflags_insert(*args): """ user_iflags_insert(map, key, val) -> user_iflags_iterator_t Insert new ( 'citem_locator_t' , int32) pair into user_iflags_t. @param map (C++: user_iflags_t *) @param key (C++: const citem_locator_t &) @param val (C++: const int32 &) """ return _ida_hexrays.user_iflags_insert(*args) def user_iflags_erase(*args): """ user_iflags_erase(map, p) Erase current element from user_iflags_t. @param map (C++: user_iflags_t *) @param p (C++: user_iflags_iterator_t) """ return _ida_hexrays.user_iflags_erase(*args) def user_iflags_clear(*args): """ user_iflags_clear(map) Clear user_iflags_t. @param map (C++: user_iflags_t *) """ return _ida_hexrays.user_iflags_clear(*args) def user_iflags_size(*args): """ user_iflags_size(map) -> size_t Get size of user_iflags_t. @param map (C++: user_iflags_t *) """ return _ida_hexrays.user_iflags_size(*args) def user_iflags_free(*args): """ user_iflags_free(map) Delete user_iflags_t instance. @param map (C++: user_iflags_t *) """ return _ida_hexrays.user_iflags_free(*args) def user_iflags_new(*args): """ user_iflags_new() -> user_iflags_t Create a new user_iflags_t instance. """ return _ida_hexrays.user_iflags_new(*args) user_unions_iterator_t_swigregister = _ida_hexrays.user_unions_iterator_t_swigregister user_unions_iterator_t_swigregister(user_unions_iterator_t) def user_unions_begin(*args): """ user_unions_begin(map) -> user_unions_iterator_t Get iterator pointing to the beginning of user_unions_t. @param map (C++: const user_unions_t *) """ return _ida_hexrays.user_unions_begin(*args) def user_unions_end(*args): """ user_unions_end(map) -> user_unions_iterator_t Get iterator pointing to the end of user_unions_t. @param map (C++: const user_unions_t *) """ return _ida_hexrays.user_unions_end(*args) def user_unions_next(*args): """ user_unions_next(p) -> user_unions_iterator_t Move to the next element. @param p (C++: user_unions_iterator_t) """ return _ida_hexrays.user_unions_next(*args) def user_unions_prev(*args): """ user_unions_prev(p) -> user_unions_iterator_t Move to the previous element. @param p (C++: user_unions_iterator_t) """ return _ida_hexrays.user_unions_prev(*args) def user_unions_first(*args): """ user_unions_first(p) -> ea_t const & Get reference to the current map key. @param p (C++: user_unions_iterator_t) """ return _ida_hexrays.user_unions_first(*args) def user_unions_second(*args): """ user_unions_second(p) -> intvec_t Get reference to the current map value. @param p (C++: user_unions_iterator_t) """ return _ida_hexrays.user_unions_second(*args) def user_unions_find(*args): """ user_unions_find(map, key) -> user_unions_iterator_t Find the specified key in user_unions_t. @param map (C++: const user_unions_t *) @param key (C++: const ea_t &) """ return _ida_hexrays.user_unions_find(*args) def user_unions_insert(*args): """ user_unions_insert(map, key, val) -> user_unions_iterator_t Insert new (ea_t, intvec_t) pair into user_unions_t. @param map (C++: user_unions_t *) @param key (C++: const ea_t &) @param val (C++: const intvec_t &) """ return _ida_hexrays.user_unions_insert(*args) def user_unions_erase(*args): """ user_unions_erase(map, p) Erase current element from user_unions_t. @param map (C++: user_unions_t *) @param p (C++: user_unions_iterator_t) """ return _ida_hexrays.user_unions_erase(*args) def user_unions_clear(*args): """ user_unions_clear(map) Clear user_unions_t. @param map (C++: user_unions_t *) """ return _ida_hexrays.user_unions_clear(*args) def user_unions_size(*args): """ user_unions_size(map) -> size_t Get size of user_unions_t. @param map (C++: user_unions_t *) """ return _ida_hexrays.user_unions_size(*args) def user_unions_free(*args): """ user_unions_free(map) Delete user_unions_t instance. @param map (C++: user_unions_t *) """ return _ida_hexrays.user_unions_free(*args) def user_unions_new(*args): """ user_unions_new() -> user_unions_t Create a new user_unions_t instance. """ return _ida_hexrays.user_unions_new(*args) user_labels_iterator_t_swigregister = _ida_hexrays.user_labels_iterator_t_swigregister user_labels_iterator_t_swigregister(user_labels_iterator_t) def user_labels_begin(*args): """ user_labels_begin(map) -> user_labels_iterator_t Get iterator pointing to the beginning of user_labels_t. @param map (C++: const user_labels_t *) """ return _ida_hexrays.user_labels_begin(*args) def user_labels_end(*args): """ user_labels_end(map) -> user_labels_iterator_t Get iterator pointing to the end of user_labels_t. @param map (C++: const user_labels_t *) """ return _ida_hexrays.user_labels_end(*args) def user_labels_next(*args): """ user_labels_next(p) -> user_labels_iterator_t Move to the next element. @param p (C++: user_labels_iterator_t) """ return _ida_hexrays.user_labels_next(*args) def user_labels_prev(*args): """ user_labels_prev(p) -> user_labels_iterator_t Move to the previous element. @param p (C++: user_labels_iterator_t) """ return _ida_hexrays.user_labels_prev(*args) def user_labels_first(*args): """ user_labels_first(p) -> int const & Get reference to the current map key. @param p (C++: user_labels_iterator_t) """ return _ida_hexrays.user_labels_first(*args) def user_labels_second(*args): """ user_labels_second(p) -> qstring & Get reference to the current map value. @param p (C++: user_labels_iterator_t) """ return _ida_hexrays.user_labels_second(*args) def user_labels_find(*args): """ user_labels_find(map, key) -> user_labels_iterator_t Find the specified key in user_labels_t. @param map (C++: const user_labels_t *) @param key (C++: const int &) """ return _ida_hexrays.user_labels_find(*args) def user_labels_insert(*args): """ user_labels_insert(map, key, val) -> user_labels_iterator_t Insert new (int, qstring) pair into user_labels_t. @param map (C++: user_labels_t *) @param key (C++: const int &) @param val (C++: const qstring &) """ return _ida_hexrays.user_labels_insert(*args) def user_labels_erase(*args): """ user_labels_erase(map, p) Erase current element from user_labels_t. @param map (C++: user_labels_t *) @param p (C++: user_labels_iterator_t) """ return _ida_hexrays.user_labels_erase(*args) def user_labels_clear(*args): """ user_labels_clear(map) Clear user_labels_t. @param map (C++: user_labels_t *) """ return _ida_hexrays.user_labels_clear(*args) def user_labels_size(*args): """ user_labels_size(map) -> size_t Get size of user_labels_t. @param map (C++: user_labels_t *) """ return _ida_hexrays.user_labels_size(*args) def user_labels_free(*args): """ user_labels_free(map) Delete user_labels_t instance. @param map (C++: user_labels_t *) """ return _ida_hexrays.user_labels_free(*args) def user_labels_new(*args): """ user_labels_new() -> user_labels_t Create a new user_labels_t instance. """ return _ida_hexrays.user_labels_new(*args) eamap_iterator_t_swigregister = _ida_hexrays.eamap_iterator_t_swigregister eamap_iterator_t_swigregister(eamap_iterator_t) def eamap_begin(*args): """ eamap_begin(map) -> eamap_iterator_t Get iterator pointing to the beginning of eamap_t. @param map (C++: const eamap_t *) """ return _ida_hexrays.eamap_begin(*args) def eamap_end(*args): """ eamap_end(map) -> eamap_iterator_t Get iterator pointing to the end of eamap_t. @param map (C++: const eamap_t *) """ return _ida_hexrays.eamap_end(*args) def eamap_next(*args): """ eamap_next(p) -> eamap_iterator_t Move to the next element. @param p (C++: eamap_iterator_t) """ return _ida_hexrays.eamap_next(*args) def eamap_prev(*args): """ eamap_prev(p) -> eamap_iterator_t Move to the previous element. @param p (C++: eamap_iterator_t) """ return _ida_hexrays.eamap_prev(*args) def eamap_first(*args): """ eamap_first(p) -> ea_t const & Get reference to the current map key. @param p (C++: eamap_iterator_t) """ return _ida_hexrays.eamap_first(*args) def eamap_second(*args): """ eamap_second(p) -> cinsnptrvec_t Get reference to the current map value. @param p (C++: eamap_iterator_t) """ return _ida_hexrays.eamap_second(*args) def eamap_find(*args): """ eamap_find(map, key) -> eamap_iterator_t Find the specified key in eamap_t. @param map (C++: const eamap_t *) @param key (C++: const ea_t &) """ return _ida_hexrays.eamap_find(*args) def eamap_insert(*args): """ eamap_insert(map, key, val) -> eamap_iterator_t Insert new (ea_t, cinsnptrvec_t) pair into eamap_t. @param map (C++: eamap_t *) @param key (C++: const ea_t &) @param val (C++: const cinsnptrvec_t &) """ return _ida_hexrays.eamap_insert(*args) def eamap_erase(*args): """ eamap_erase(map, p) Erase current element from eamap_t. @param map (C++: eamap_t *) @param p (C++: eamap_iterator_t) """ return _ida_hexrays.eamap_erase(*args) def eamap_clear(*args): """ eamap_clear(map) Clear eamap_t. @param map (C++: eamap_t *) """ return _ida_hexrays.eamap_clear(*args) def eamap_size(*args): """ eamap_size(map) -> size_t Get size of eamap_t. @param map (C++: eamap_t *) """ return _ida_hexrays.eamap_size(*args) def eamap_free(*args): """ eamap_free(map) Delete eamap_t instance. @param map (C++: eamap_t *) """ return _ida_hexrays.eamap_free(*args) def eamap_new(*args): """ eamap_new() -> eamap_t Create a new eamap_t instance. """ return _ida_hexrays.eamap_new(*args) boundaries_iterator_t_swigregister = _ida_hexrays.boundaries_iterator_t_swigregister boundaries_iterator_t_swigregister(boundaries_iterator_t) def boundaries_begin(*args): """ boundaries_begin(map) -> boundaries_iterator_t Get iterator pointing to the beginning of boundaries_t. @param map (C++: const boundaries_t *) """ return _ida_hexrays.boundaries_begin(*args) def boundaries_end(*args): """ boundaries_end(map) -> boundaries_iterator_t Get iterator pointing to the end of boundaries_t. @param map (C++: const boundaries_t *) """ return _ida_hexrays.boundaries_end(*args) def boundaries_next(*args): """ boundaries_next(p) -> boundaries_iterator_t Move to the next element. @param p (C++: boundaries_iterator_t) """ return _ida_hexrays.boundaries_next(*args) def boundaries_prev(*args): """ boundaries_prev(p) -> boundaries_iterator_t Move to the previous element. @param p (C++: boundaries_iterator_t) """ return _ida_hexrays.boundaries_prev(*args) def boundaries_first(*args): """ boundaries_first(p) -> cinsn_t Get reference to the current map key. @param p (C++: boundaries_iterator_t) """ return _ida_hexrays.boundaries_first(*args) def boundaries_second(*args): """ boundaries_second(p) -> rangeset_t Get reference to the current map value. @param p (C++: boundaries_iterator_t) """ return _ida_hexrays.boundaries_second(*args) def boundaries_erase(*args): """ boundaries_erase(map, p) Erase current element from boundaries_t. @param map (C++: boundaries_t *) @param p (C++: boundaries_iterator_t) """ return _ida_hexrays.boundaries_erase(*args) def boundaries_clear(*args): """ boundaries_clear(map) Clear boundaries_t. @param map (C++: boundaries_t *) """ return _ida_hexrays.boundaries_clear(*args) def boundaries_size(*args): """ boundaries_size(map) -> size_t Get size of boundaries_t. @param map (C++: boundaries_t *) """ return _ida_hexrays.boundaries_size(*args) def boundaries_free(*args): """ boundaries_free(map) Delete boundaries_t instance. @param map (C++: boundaries_t *) """ return _ida_hexrays.boundaries_free(*args) def boundaries_new(*args): """ boundaries_new() -> boundaries_t Create a new boundaries_t instance. """ return _ida_hexrays.boundaries_new(*args) block_chains_iterator_t_swigregister = _ida_hexrays.block_chains_iterator_t_swigregister block_chains_iterator_t_swigregister(block_chains_iterator_t) def block_chains_begin(*args): """ block_chains_begin(set) -> block_chains_iterator_t Get iterator pointing to the beginning of 'block_chains_t' . @param set (C++: const block_chains_t *) """ return _ida_hexrays.block_chains_begin(*args) def block_chains_end(*args): """ block_chains_end(set) -> block_chains_iterator_t Get iterator pointing to the end of 'block_chains_t' . @param set (C++: const block_chains_t *) """ return _ida_hexrays.block_chains_end(*args) def block_chains_next(*args): """ block_chains_next(p) -> block_chains_iterator_t Move to the next element. @param p (C++: block_chains_iterator_t) """ return _ida_hexrays.block_chains_next(*args) def block_chains_prev(*args): """ block_chains_prev(p) -> block_chains_iterator_t Move to the previous element. @param p (C++: block_chains_iterator_t) """ return _ida_hexrays.block_chains_prev(*args) def block_chains_get(*args): """ block_chains_get(p) -> chain_t Get reference to the current set value. @param p (C++: block_chains_iterator_t) """ return _ida_hexrays.block_chains_get(*args) def block_chains_find(*args): """ block_chains_find(set, val) -> block_chains_iterator_t Find the specified key in set 'block_chains_t' . @param set (C++: const block_chains_t *) @param val (C++: const chain_t &) """ return _ida_hexrays.block_chains_find(*args) def block_chains_insert(*args): """ block_chains_insert(set, val) -> block_chains_iterator_t Insert new ( 'chain_t' ) into set 'block_chains_t' . @param set (C++: block_chains_t *) @param val (C++: const chain_t &) """ return _ida_hexrays.block_chains_insert(*args) def block_chains_erase(*args): """ block_chains_erase(set, p) Erase current element from 'block_chains_t' . @param set (C++: block_chains_t *) @param p (C++: block_chains_iterator_t) """ return _ida_hexrays.block_chains_erase(*args) def block_chains_clear(*args): """ block_chains_clear(set) Clear 'block_chains_t' . @param set (C++: block_chains_t *) """ return _ida_hexrays.block_chains_clear(*args) def block_chains_size(*args): """ block_chains_size(set) -> size_t Get size of 'block_chains_t' . @param set (C++: block_chains_t *) """ return _ida_hexrays.block_chains_size(*args) def block_chains_free(*args): """ block_chains_free(set) Delete 'block_chains_t' instance. @param set (C++: block_chains_t *) """ return _ida_hexrays.block_chains_free(*args) def block_chains_new(*args): """ block_chains_new() -> block_chains_t Create a new 'block_chains_t' instance. """ return _ida_hexrays.block_chains_new(*args) #<pycode(py_hexrays)> import ida_funcs hexrays_failure_t.__str__ = lambda self: str("%x: %s" % (self.errea, self.desc())) # --------------------------------------------------------------------- # Renamings is_allowed_on_small_struni = accepts_small_udts is_small_struni = is_small_udt # --------------------------------------------------------------------- # --------------------------------------------------------------------- def decompile(ea, hf=None, flags=0): if isinstance(ea, (int, long)): func = ida_funcs.get_func(ea) if not func: return elif type(ea) == ida_funcs.func_t: func = ea else: raise RuntimeError('arg 1 of decompile expects either ea_t or cfunc_t argument') if hf is None: hf = hexrays_failure_t() ptr = _ida_hexrays.decompile_func(func, hf, flags) if ptr.__deref__() is None: raise DecompilationFailure(hf) return ptr # --------------------------------------------------------------------- # stringify all string types #qtype.__str__ = qtype.c_str #qstring.__str__ = qstring.c_str #citem_cmt_t.__str__ = citem_cmt_t.c_str # --------------------------------------------------------------------- # listify all list types import ida_idaapi ida_idaapi._listify_types( cinsnptrvec_t, ctree_items_t, qvector_lvar_t, qvector_carg_t, qvector_ccase_t, hexwarns_t, history_t, lvar_saved_infos_t, ui_stroff_ops_t) def citem_to_specific_type(self): """ cast the citem_t object to its more specific type, either cexpr_t or cinsn_t. """ if self.op >= cot_empty and self.op <= cot_last: return self.cexpr elif self.op >= cit_empty and self.op < cit_end: return self.cinsn raise RuntimeError('unknown op type %s' % (repr(self.op), )) citem_t.to_specific_type = property(citem_to_specific_type) """ array used for translating cinsn_t->op type to their names. """ cinsn_t.op_to_typename = {} for k in dir(_ida_hexrays): if k.startswith('cit_'): cinsn_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:] """ array used for translating cexpr_t->op type to their names. """ cexpr_t.op_to_typename = {} for k in dir(_ida_hexrays): if k.startswith('cot_'): cexpr_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:] cinsn_t.opname = property(property_op_to_typename) cexpr_t.opname = property(property_op_to_typename) def cexpr_operands(self): """ return a dictionary with the operands of a cexpr_t. """ if self.op >= cot_comma and self.op <= cot_asgumod or \ self.op >= cot_lor and self.op <= cot_fdiv or \ self.op == cot_idx: return {'x': self.x, 'y': self.y} elif self.op == cot_tern: return {'x': self.x, 'y': self.y, 'z': self.z} elif self.op in [cot_fneg, cot_neg, cot_sizeof] or \ self.op >= cot_lnot and self.op <= cot_predec: return {'x': self.x} elif self.op == cot_cast: return {'type': self.type, 'x': self.x} elif self.op == cot_call: return {'x': self.x, 'a': self.a} elif self.op in [cot_memref, cot_memptr]: return {'x': self.x, 'm': self.m} elif self.op == cot_num: return {'n': self.n} elif self.op == cot_fnum: return {'fpc': self.fpc} elif self.op == cot_str: return {'string': self.string} elif self.op == cot_obj: return {'obj_ea': self.obj_ea} elif self.op == cot_var: return {'v': self.v} elif self.op == cot_helper: return {'helper': self.helper} raise RuntimeError('unknown op type %s' % self.opname) cexpr_t.operands = property(cexpr_operands) def cinsn_details(self): """ return the details pointer for the cinsn_t object depending on the value of its op member. \ this is one of the cblock_t, cif_t, etc. objects. """ if self.op not in self.op_to_typename: raise RuntimeError('unknown item->op type') opname = self.opname if opname == 'empty': return self if opname in ['break', 'continue']: return None return getattr(self, 'c' + opname) cinsn_t.details = property(cinsn_details) cblock_t.__iter__ = cblock_iter cblock_t.__len__ = cblock_t.size # cblock.find(cinsn_t) -> returns the iterator positioned at the given item cblock_t.find = cblock_find # cblock.index(cinsn_t) -> returns the index of the given item cblock_t.index = cblock_index # cblock.at(int) -> returns the item at the given index index cblock_t.at = cblock_at # cblock.remove(cinsn_t) cblock_t.remove = cblock_remove # cblock.insert(index, cinsn_t) cblock_t.insert = cblock_insert cfuncptr_t.__str__ = lambda self: str(self.__deref__()) import ida_typeinf def cfunc_type(self): """ Get the function's return type tinfo_t object. """ tif = ida_typeinf.tinfo_t() result = self.get_func_type(tif) if not result: return return tif cfunc_t.type = property(cfunc_type) cfuncptr_t.type = property(lambda self: self.__deref__().type) cfunc_t.arguments = property(lambda self: [o for o in self.lvars if o.is_arg_var]) cfuncptr_t.arguments = property(lambda self: self.__deref__().arguments) cfunc_t.lvars = property(cfunc_t.get_lvars) cfuncptr_t.lvars = property(lambda self: self.__deref__().lvars) cfunc_t.warnings = property(cfunc_t.get_warnings) cfuncptr_t.warnings = property(lambda self: self.__deref__().warnings) cfunc_t.pseudocode = property(cfunc_t.get_pseudocode) cfuncptr_t.pseudocode = property(lambda self: self.__deref__().get_pseudocode()) cfunc_t.eamap = property(cfunc_t.get_eamap) cfuncptr_t.eamap = property(lambda self: self.__deref__().get_eamap()) cfunc_t.boundaries = property(cfunc_t.get_boundaries) cfuncptr_t.boundaries = property(lambda self: self.__deref__().get_boundaries()) #pragma SWIG nowarn=+503 lvar_t.used = property(lvar_t.used) lvar_t.typed = property(lvar_t.typed) lvar_t.mreg_done = property(lvar_t.mreg_done) lvar_t.has_nice_name = property(lvar_t.has_nice_name) lvar_t.is_unknown_width = property(lvar_t.is_unknown_width) lvar_t.has_user_info = property(lvar_t.has_user_info) lvar_t.has_user_name = property(lvar_t.has_user_name) lvar_t.has_user_type = property(lvar_t.has_user_type) lvar_t.is_result_var = property(lvar_t.is_result_var) lvar_t.is_arg_var = property(lvar_t.is_arg_var) lvar_t.is_fake_var = property(lvar_t.is_fake_var) lvar_t.is_overlapped_var = property(lvar_t.is_overlapped_var) lvar_t.is_floating_var = property(lvar_t.is_floating_var) lvar_t.is_spoiled_var = property(lvar_t.is_spoiled_var) lvar_t.is_mapdst_var = property(lvar_t.is_mapdst_var) # dictify all dict-like types #_map_as_dict(user_labels_t, 'user_labels', (int, long), qstring) _map_as_dict(user_cmts_t, 'user_cmts', treeloc_t, citem_cmt_t) _map_as_dict(user_numforms_t, 'user_numforms', operand_locator_t, number_format_t) _map_as_dict(user_iflags_t, 'user_iflags', citem_locator_t, int) import ida_pro _map_as_dict(user_unions_t, 'user_unions', (int, long), ida_pro.intvec_t) _map_as_dict(eamap_t, 'eamap', long, cinsnptrvec_t) import ida_range _map_as_dict(boundaries_t, 'boundaries', cinsn_t, ida_range.rangeset_t) # # Object ownership # # ---------------- def install_hexrays_callback(callback): "Deprecated. Please use Hexrays_Hooks instead" h = __cbhooks_t(callback) h.hook() return True def remove_hexrays_callback(callback): "Deprecated. Please use Hexrays_Hooks instead" for inst in __cbhooks_t.instances: if inst.callback == callback: inst.unhook() __cbhooks_t.instances.remove(inst) return 1 return 0 #</pycode(py_hexrays)> if _BC695: get_tform_vdui=get_widget_vdui hx_get_tform_vdui=hx_get_widget_vdui HEXRAYS_API_MAGIC1=(HEXRAYS_API_MAGIC>>32) HEXRAYS_API_MAGIC2=(HEXRAYS_API_MAGIC&0xFFFFFFFF)
[ 2, 770, 2393, 373, 6338, 7560, 416, 12672, 3528, 357, 4023, 1378, 2503, 13, 2032, 328, 13, 2398, 737, 198, 2, 10628, 362, 13, 15, 13, 1065, 198, 2, 198, 2, 2141, 407, 787, 2458, 284, 428, 2393, 4556, 345, 760, 644, 345, 389, 180...
2.265584
67,090
#!/usr/bin/env python3 """ @author: Sam Cook MySql Parser for graphical presentation """ import mysql.connector import datetime from mysql.connector import Error from datetime import datetime, timedelta import json
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 198, 31, 9800, 25, 3409, 8261, 198, 3666, 50, 13976, 23042, 263, 329, 27831, 10470, 198, 37811, 198, 11748, 48761, 13, 8443, 273, 198, 11748, 4818, 8079, 198, 6738, 48761, 1...
3.557377
61
# test_fluxqubit.py # meant to be run with 'pytest' # # This file is part of scqubits. # # Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. ############################################################################ import numpy as np from scqubits import FluxQubit from scqubits.tests.conftest import StandardTests
[ 2, 1332, 62, 69, 22564, 421, 2545, 13, 9078, 198, 2, 4001, 284, 307, 1057, 351, 705, 9078, 9288, 6, 198, 2, 198, 2, 770, 2393, 318, 636, 286, 629, 421, 9895, 13, 198, 2, 198, 2, 220, 220, 220, 15069, 357, 66, 8, 13130, 290, ...
3.448276
145
import FWCore.ParameterSet.Config as cms patPFParticles = cms.EDProducer("PATPFParticleProducer", # General configurables pfCandidateSource = cms.InputTag("noJet"), # MC matching configurables addGenMatch = cms.bool(False), genParticleMatch = cms.InputTag(""), ## particles source to be used for the MC matching ## must be an InputTag or VInputTag to a product of ## type edm::Association<reco::GenParticleCollection> embedGenMatch = cms.bool(False), ## embed gen match inside the object instead of storing the ref # add user data userData = cms.PSet( # add custom classes here userClasses = cms.PSet( src = cms.VInputTag('') ), # add doubles here userFloats = cms.PSet( src = cms.VInputTag('') ), # add ints here userInts = cms.PSet( src = cms.VInputTag('') ), # add candidate ptrs here userCands = cms.PSet( src = cms.VInputTag('') ), # add "inline" functions here userFunctions = cms.vstring(), userFunctionLabels = cms.vstring() ), # Efficiencies addEfficiencies = cms.bool(False), efficiencies = cms.PSet(), # resolution addResolutions = cms.bool(False), resolutions = cms.PSet(), )
[ 11748, 48849, 14055, 13, 36301, 7248, 13, 16934, 355, 269, 907, 198, 198, 8071, 47, 5837, 26845, 796, 269, 907, 13, 1961, 11547, 2189, 7203, 47, 1404, 47, 5837, 20205, 11547, 2189, 1600, 198, 220, 220, 220, 1303, 3611, 4566, 333, 2977...
2.192369
629
# coding: utf8 from __future__ import unicode_literals import pytest import spacy import json from api.server import parse, doc2json, load_model def test_server_parse(model, text, doc): load_model(model) json_doc = parse(model, text) direct_json_doc = doc2json(doc, model) assert json.dumps(json_doc, sort_keys=True) == json.dumps( direct_json_doc, sort_keys=True ) def test_doc2json_doc_tokens(doc, model): data = doc2json(doc, model) assert data["model"] == model assert data["doc"]["text"] == doc.text assert data["doc"]["text_with_ws"] == doc.text_with_ws assert data["doc"]["is_tagged"] assert data["doc"]["is_parsed"] assert data["doc"]["is_sentenced"] assert len(data["tokens"]) == len(doc) assert data["tokens"][0]["text"] == doc[0].text assert data["tokens"][0]["head"] == doc[0].head.i def test_doc2json_doc_ents(doc, model): data = doc2json(doc, model) ents = list(doc.ents) assert "ents" in data assert len(data["ents"]) == len(ents) assert len(data["ents"]) >= 1 assert data["ents"][0]["start"] == ents[0].start assert data["ents"][0]["end"] == ents[0].end assert data["ents"][0]["label"] == ents[0].label_ def test_doc2json_doc_sents(doc, model): data = doc2json(doc, model) sents = list(doc.sents) assert "sents" in data assert len(data["sents"]) == len(sents) assert len(data["sents"]) >= 1 assert data["sents"][0]["start"] == sents[0].start assert data["sents"][0]["end"] == sents[0].end def test_doc2json_doc_noun_chunks(doc, model): data = doc2json(doc, model) chunks = list(doc.noun_chunks) assert "noun_chunks" in data assert len(data["noun_chunks"]) == len(chunks) assert len(data["noun_chunks"]) >= 1 assert data["noun_chunks"][0]["start"] == chunks[0].start assert data["noun_chunks"][0]["end"] == chunks[0].end
[ 2, 19617, 25, 3384, 69, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 11748, 12972, 9288, 198, 11748, 599, 1590, 198, 11748, 33918, 198, 198, 6738, 40391, 13, 15388, 1330, 21136, 11, 2205, 17, 17752, 1...
2.382022
801
########################################################################## # # Copyright (c) 2018, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import IECore import IECoreScene import Gaffer import GafferScene import GafferArnold import GafferDispatch import GafferImage import imath import inspect IECore.registerRunTimeTyped( ArnoldTextureBake, typeName = "GafferArnold::ArnoldTextureBake" )
[ 29113, 29113, 7804, 2235, 198, 2, 198, 2, 220, 15069, 357, 66, 8, 2864, 11, 7412, 7117, 8495, 3457, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 220, 2297, 396, 3890, 290, 779, 287, 2723, 290, 13934, 5107, 11, 351, 393, 1231, 198, ...
3.371522
611
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2022 Valory AG # Copyright 2018-2021 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains utility code for generator modules.""" import inspect import os import re import shutil import subprocess # nosec import sys import tempfile from pathlib import Path from typing import Tuple from aea.configurations.base import ProtocolSpecification from aea.configurations.constants import ( DEFAULT_PROTOCOL_CONFIG_FILE, PACKAGES, PROTOCOL_LANGUAGE_JS, PROTOCOL_LANGUAGE_PYTHON, ) from aea.configurations.loader import ConfigLoader from aea.helpers.io import open_file SPECIFICATION_PRIMITIVE_TYPES = ["pt:bytes", "pt:int", "pt:float", "pt:bool", "pt:str"] SPECIFICATION_COMPOSITIONAL_TYPES = [ "pt:set", "pt:list", "pt:dict", "pt:union", "pt:optional", ] PYTHON_COMPOSITIONAL_TYPES = [ "FrozenSet", "Tuple", "Dict", "Union", "Optional", ] MESSAGE_IMPORT = "from aea.protocols.base import Message" SERIALIZER_IMPORT = "from aea.protocols.base import Serializer" PATH_TO_PACKAGES = PACKAGES INIT_FILE_NAME = "__init__.py" PROTOCOL_YAML_FILE_NAME = DEFAULT_PROTOCOL_CONFIG_FILE MESSAGE_DOT_PY_FILE_NAME = "message.py" DIALOGUE_DOT_PY_FILE_NAME = "dialogues.py" CUSTOM_TYPES_DOT_PY_FILE_NAME = "custom_types.py" SERIALIZATION_DOT_PY_FILE_NAME = "serialization.py" PYTHON_TYPE_TO_PROTO_TYPE = { "bytes": "bytes", "int": "int32", "float": "float", "bool": "bool", "str": "string", } CURRENT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore ISORT_CONFIGURATION_FILE = os.path.join(CURRENT_DIR, "isort.cfg") ISORT_CLI_ARGS = [ "--settings-path", ISORT_CONFIGURATION_FILE, "--quiet", ] PROTOLINT_CONFIGURATION_FILE_NAME = "protolint.yaml" PROTOLINT_CONFIGURATION = """lint: rules: remove: - MESSAGE_NAMES_UPPER_CAMEL_CASE - ENUM_FIELD_NAMES_ZERO_VALUE_END_WITH - PACKAGE_NAME_LOWER_CASE - REPEATED_FIELD_NAMES_PLURALIZED - FIELD_NAMES_LOWER_SNAKE_CASE""" PROTOLINT_INDENTATION_ERROR_STR = "incorrect indentation style" PROTOLINT_ERROR_WHITELIST = [PROTOLINT_INDENTATION_ERROR_STR] def _to_camel_case(text: str) -> str: """ Convert a text in snake_case format into the CamelCase format. :param text: the text to be converted. :return: The text in CamelCase format. """ return "".join(word.title() for word in text.split("_")) def _camel_case_to_snake_case(text: str) -> str: """ Convert a text in CamelCase format into the snake_case format. :param text: the text to be converted. :return: The text in CamelCase format. """ return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower() def _match_brackets(text: str, index_of_open_bracket: int) -> int: """ Give the index of the matching close bracket for the opening bracket at 'index_of_open_bracket' in the input 'text'. :param text: the text containing the brackets. :param index_of_open_bracket: the index of the opening bracket. :return: the index of the matching closing bracket (if any). :raises SyntaxError if there are no matching closing bracket. """ if text[index_of_open_bracket] != "[": raise SyntaxError( "Index {} in 'text' is not an open bracket '['. It is {}".format( index_of_open_bracket, text[index_of_open_bracket], ) ) open_bracket_stack = [] for index in range(index_of_open_bracket, len(text)): if text[index] == "[": open_bracket_stack.append(text[index]) elif text[index] == "]": open_bracket_stack.pop() if not open_bracket_stack: return index raise SyntaxError( "No matching closing bracket ']' for the opening bracket '[' at {} " + str(index_of_open_bracket) ) def _has_matched_brackets(text: str) -> bool: """ Evaluate whether every opening bracket '[' in the 'text' has a matching closing bracket ']'. :param text: the text. :return: Boolean result, and associated message. """ open_bracket_stack = [] for index, _ in enumerate(text): if text[index] == "[": open_bracket_stack.append(index) elif text[index] == "]": if len(open_bracket_stack) == 0: return False open_bracket_stack.pop() return len(open_bracket_stack) == 0 def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]: """ Extract the sub-types of compositional types. This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]). :param compositional_type: the compositional type string whose sub-types are to be extracted. :return: tuple containing all extracted sub-types. """ sub_types_list = list() for valid_compositional_type in ( SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES ): if compositional_type.startswith(valid_compositional_type): inside_string = compositional_type[ compositional_type.index("[") + 1 : compositional_type.rindex("]") ].strip() while inside_string != "": do_not_add = False if inside_string.find(",") == -1: # No comma; this is the last sub-type provisional_sub_type = inside_string.strip() if ( provisional_sub_type == "..." ): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...] do_not_add = True else: sub_type = provisional_sub_type inside_string = "" else: # There is a comma; this MAY not be the last sub-type sub_string_until_comma = inside_string[ : inside_string.index(",") ].strip() if ( sub_string_until_comma.find("[") == -1 ): # No open brackets; this is a primitive type and NOT the last sub-type sub_type = sub_string_until_comma inside_string = inside_string[ inside_string.index(",") + 1 : ].strip() else: # There is an open bracket'['; this is a compositional type try: closing_bracket_index = _match_brackets( inside_string, inside_string.index("[") ) except SyntaxError: raise SyntaxError( "Bad formatting. No matching close bracket ']' for the open bracket at {}".format( inside_string[ : inside_string.index("[") + 1 ].strip() ) ) sub_type = inside_string[: closing_bracket_index + 1].strip() the_rest_of_inside_string = inside_string[ closing_bracket_index + 1 : ].strip() if ( the_rest_of_inside_string.find(",") == -1 ): # No comma; this is the last sub-type inside_string = the_rest_of_inside_string.strip() else: # There is a comma; this is not the last sub-type inside_string = the_rest_of_inside_string[ the_rest_of_inside_string.index(",") + 1 : ].strip() if not do_not_add: sub_types_list.append(sub_type) return tuple(sub_types_list) raise SyntaxError( "{} is not a valid compositional type.".format(compositional_type) ) def _union_sub_type_to_protobuf_variable_name( content_name: str, content_type: str ) -> str: """ Given a content of type union, create a variable name for its sub-type for protobuf. :param content_name: the name of the content :param content_type: the sub-type of a union type :return: The variable name """ if content_type.startswith("FrozenSet"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] expanded_type_str = "set_of_{}".format(sub_type) elif content_type.startswith("Tuple"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] expanded_type_str = "list_of_{}".format(sub_type) elif content_type.startswith("Dict"): sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0] sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1] expanded_type_str = "dict_of_{}_{}".format(sub_type_1, sub_type_2) else: expanded_type_str = content_type protobuf_variable_name = "{}_type_{}".format(content_name, expanded_type_str) return protobuf_variable_name def _python_pt_or_ct_type_to_proto_type(content_type: str) -> str: """ Convert a PT or CT from python to their protobuf equivalent. :param content_type: the python type :return: The protobuf equivalent """ if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys(): proto_type = PYTHON_TYPE_TO_PROTO_TYPE[content_type] else: proto_type = content_type return proto_type def _includes_custom_type(content_type: str) -> bool: """ Evaluate whether a content type is a custom type or has a custom type as a sub-type. :param content_type: the content type :return: Boolean result """ if content_type.startswith("Optional"): sub_type = _get_sub_types_of_compositional_types(content_type)[0] result = _includes_custom_type(sub_type) elif content_type.startswith("Union"): sub_types = _get_sub_types_of_compositional_types(content_type) result = False for sub_type in sub_types: if _includes_custom_type(sub_type): result = True break elif ( content_type.startswith("FrozenSet") or content_type.startswith("Tuple") or content_type.startswith("Dict") or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys() ): result = False else: result = True return result def is_installed(programme: str) -> bool: """ Check whether a programme is installed on the system. :param programme: the name of the programme. :return: True if installed, False otherwise """ res = shutil.which(programme) return res is not None def base_protolint_command() -> str: """ Return the base protolint command. :return: The base protolint command """ if sys.platform.startswith("win"): protolint_base_cmd = "protolint" # pragma: nocover else: protolint_base_cmd = "PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint" return protolint_base_cmd def check_prerequisites() -> None: """Check whether a programme is installed on the system.""" # check black code formatter is installed if not is_installed("black"): raise FileNotFoundError( "Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html" ) # check isort code formatter is installed if not is_installed("isort"): raise FileNotFoundError( "Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort" ) # check protolint code formatter is installed if subprocess.call(f"{base_protolint_command()} version", shell=True) != 0: # nosec raise FileNotFoundError( "Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint." ) # check protocol buffer compiler is installed if not is_installed("protoc"): raise FileNotFoundError( "Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/" ) def get_protoc_version() -> str: """Get the protoc version used.""" result = subprocess.run( # nosec ["protoc", "--version"], stdout=subprocess.PIPE, check=True ) result_str = result.stdout.decode("utf-8").strip("\n").strip("\r") return result_str def load_protocol_specification(specification_path: str) -> ProtocolSpecification: """ Load a protocol specification. :param specification_path: path to the protocol specification yaml file. :return: A ProtocolSpecification object """ config_loader = ConfigLoader( "protocol-specification_schema.json", ProtocolSpecification ) protocol_spec = config_loader.load_protocol_specification( open_file(specification_path) ) return protocol_spec def _create_protocol_file( path_to_protocol_package: str, file_name: str, file_content: str ) -> None: """ Create a file in the generated protocol package. :param path_to_protocol_package: path to the file :param file_name: the name of the file :param file_content: the content of the file """ pathname = os.path.join(path_to_protocol_package, file_name) with open_file(pathname, "w") as file: file.write(file_content) def try_run_black_formatting(path_to_protocol_package: str) -> None: """ Run Black code formatting via subprocess. :param path_to_protocol_package: a path where formatting should be applied. """ subprocess.run( # nosec [sys.executable, "-m", "black", path_to_protocol_package, "--quiet"], check=True, ) def try_run_isort_formatting(path_to_protocol_package: str) -> None: """ Run Isort code formatting via subprocess. :param path_to_protocol_package: a path where formatting should be applied. """ subprocess.run( # nosec [sys.executable, "-m", "isort", *ISORT_CLI_ARGS, path_to_protocol_package], check=True, ) def try_run_protoc( path_to_generated_protocol_package: str, name: str, language: str = PROTOCOL_LANGUAGE_PYTHON, ) -> None: """ Run 'protoc' protocol buffer compiler via subprocess. :param path_to_generated_protocol_package: path to the protocol buffer schema file. :param name: name of the protocol buffer schema file. :param language: the target language in which to compile the protobuf schema file """ # for closure-styled imports for JS, comment the first line and uncomment the second js_commonjs_import_option = ( "import_style=commonjs,binary:" if language == PROTOCOL_LANGUAGE_JS else "" ) language_part_of_the_command = f"--{language}_out={js_commonjs_import_option}{path_to_generated_protocol_package}" subprocess.run( # nosec [ "protoc", f"-I={path_to_generated_protocol_package}", language_part_of_the_command, f"{path_to_generated_protocol_package}/{name}.proto", ], stderr=subprocess.PIPE, encoding="utf-8", check=True, env=os.environ.copy(), ) def try_run_protolint(path_to_generated_protocol_package: str, name: str) -> None: """ Run 'protolint' linter via subprocess. :param path_to_generated_protocol_package: path to the protocol buffer schema file. :param name: name of the protocol buffer schema file. """ # path to proto file path_to_proto_file = os.path.join( path_to_generated_protocol_package, f"{name}.proto", ) # Dump protolint configuration into a temporary file temp_dir = tempfile.mkdtemp() path_to_configuration_in_tmp_file = Path( temp_dir, PROTOLINT_CONFIGURATION_FILE_NAME ) with open_file(path_to_configuration_in_tmp_file, "w") as file: file.write(PROTOLINT_CONFIGURATION) # Protolint command cmd = f'{base_protolint_command()} lint -config_path={path_to_configuration_in_tmp_file} -fix "{path_to_proto_file}"' # Execute protolint command subprocess.run( # nosec cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, encoding="utf-8", check=True, env=os.environ.copy(), shell=True, ) # Delete temporary configuration file shutil.rmtree(temp_dir) # pragma: no cover def check_protobuf_using_protoc( path_to_generated_protocol_package: str, name: str ) -> Tuple[bool, str]: """ Check whether a protocol buffer schema file is valid. Validation is via trying to compile the schema file. If successfully compiled it is valid, otherwise invalid. If valid, return True and a 'protobuf file is valid' message, otherwise return False and the error thrown by the compiler. :param path_to_generated_protocol_package: path to the protocol buffer schema file. :param name: name of the protocol buffer schema file. :return: Boolean result and an accompanying message """ try: try_run_protoc(path_to_generated_protocol_package, name) os.remove(os.path.join(path_to_generated_protocol_package, name + "_pb2.py")) return True, "protobuf file is valid" except subprocess.CalledProcessError as e: pattern = name + ".proto:[0-9]+:[0-9]+: " error_message = re.sub(pattern, "", e.stderr[:-1]) return False, error_message def compile_protobuf_using_protoc( path_to_generated_protocol_package: str, name: str, language: str ) -> Tuple[bool, str]: """ Compile a protocol buffer schema file using protoc. If successfully compiled, return True and a success message, otherwise return False and the error thrown by the compiler. :param path_to_generated_protocol_package: path to the protocol buffer schema file. :param name: name of the protocol buffer schema file. :param language: the target language in which to compile the protobuf schema file :return: Boolean result and an accompanying message """ try: try_run_protoc(path_to_generated_protocol_package, name, language) return True, "protobuf schema successfully compiled" except subprocess.CalledProcessError as e: pattern = name + ".proto:[0-9]+:[0-9]+: " error_message = re.sub(pattern, "", e.stderr[:-1]) return False, error_message def apply_protolint(path_to_proto_file: str, name: str) -> Tuple[bool, str]: """ Apply protolint linter to a protocol buffer schema file. If no output, return True and a success message, otherwise return False and the output shown by the linter (minus the indentation suggestions which are automatically fixed by protolint). :param path_to_proto_file: path to the protocol buffer schema file. :param name: name of the protocol buffer schema file. :return: Boolean result and an accompanying message """ try: try_run_protolint(path_to_proto_file, name) return True, "protolint has no output" except subprocess.CalledProcessError as e: lines_to_show = [] for line in e.stderr.split("\n"): to_show = True for whitelist_error_str in PROTOLINT_ERROR_WHITELIST: if whitelist_error_str in line: to_show = False break if to_show: lines_to_show.append(line) error_message = "\n".join(lines_to_show) return False, error_message
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 16529, 26171, 198, 2, 198, 2, 220, 220, 15069, 33160, 3254, 652, 13077, 198, 2, 220, 220, 15069, 2864, 12, 1238, 2481, 376, 7569, 13, 20185, 15302, 198, 2, 198, 2...
2.380171
8,654
# -*- coding: utf-8 -*- # FOGLAMP_BEGIN # See: http://foglamp.readthedocs.io/ # FOGLAMP_END import os import asyncio import json from unittest.mock import MagicMock, patch from collections import Counter from aiohttp import web import pytest from foglamp.services.core import routes from foglamp.services.core import connect from foglamp.plugins.storage.common.backup import Backup from foglamp.plugins.storage.common.restore import Restore from foglamp.plugins.storage.common import exceptions from foglamp.services.core.api import backup_restore from foglamp.common.storage_client.storage_client import StorageClientAsync __author__ = "Vaibhav Singhal" __copyright__ = "Copyright (c) 2017 OSIsoft, LLC" __license__ = "Apache 2.0" __version__ = "${VERSION}"
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 376, 7730, 43, 23518, 62, 33, 43312, 198, 2, 4091, 25, 2638, 1378, 69, 28678, 696, 13, 961, 83, 704, 420, 82, 13, 952, 14, 198, 2, 376, 7730, 43, 23518, ...
3.072
250
""" Why need dimensional reduction The following is the use of dimensionality reduction in the data set: As data dimensions continue to decrease, the space required for data storage will also decrease. Low-dimensional data helps reduce calculation/training time. Some algorithms tend to perform poorly on high-dimensional data, and dimensionality reduction can improve algorithm availability. Dimensionality reduction can solve the problem of multicollinearity by removing redundant features. For example, we have two variables: "On the treadmill for a period of time Time spent and calorie consumption. These two variables are highly correlated. The longer the time spent on the treadmill, the more calories burned. Naturally, the more. Therefore, it does not make much sense to store these two data at the same time, just one is enough. Dimensionality reduction helps data visualization. As mentioned earlier, if the dimensionality of the data is very high, the visualization will become quite difficult, while drawing two-dimensional three-dimensional The graph of dimensional data is very simple. Common dimensional reduction techniques: 1. missing value ratio 2. low variance filter 3. high correlation filter 4. random forest 5. backward feature elimination 6. forward feature selection 7. factor analysis 8. principle components analysis 9. independent component analysis 10. IOSMAP 11. t-SNE 12. UMAP """ random_state = 0 from enum import Enum
[ 37811, 198, 5195, 761, 38517, 7741, 198, 198, 464, 1708, 318, 262, 779, 286, 15793, 1483, 7741, 287, 262, 1366, 900, 25, 198, 1081, 1366, 15225, 2555, 284, 10070, 11, 262, 2272, 2672, 329, 1366, 6143, 481, 635, 10070, 13, 198, 7754, ...
4.705128
312
""" These methods can be called inside WebCAT to determine which tests are loaded for a given section/exam pair. This allows a common WebCAT submission site to support different project tests """
[ 37811, 198, 4711, 5050, 460, 307, 1444, 2641, 5313, 34, 1404, 284, 5004, 543, 5254, 389, 9639, 198, 1640, 257, 1813, 2665, 14, 1069, 321, 5166, 13, 220, 770, 3578, 257, 2219, 5313, 34, 1404, 14498, 2524, 284, 198, 11284, 1180, 1628, ...
4.234043
47
from datetime import datetime import scrapy import lxml from lxml.html.clean import Cleaner import re SOURCE = 'Pgina 12' LANGUAGE = 'es' cleaner = Cleaner(allow_tags=['p', 'br', 'b', 'a', 'strong', 'i', 'em'])
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 15881, 88, 198, 11748, 300, 19875, 198, 6738, 300, 19875, 13, 6494, 13, 27773, 1330, 5985, 263, 198, 11748, 302, 628, 198, 47690, 796, 705, 31743, 1437, 1105, 6, 198, 43, 15567, 52, 1187...
2.6625
80
from calendar import c from typing import Dict, List, Union from zlib import DEF_BUF_SIZE import json_lines import numpy as np import re from sklearn.preprocessing import MultiLabelBinarizer from sklearn.manifold import TSNE from sklearn.preprocessing import StandardScaler import pandas as pd import json from scipy.sparse.linalg import svds from scipy.spatial import distance import os import streamlit as st if __name__ == "__main__": file_path = os.path.dirname(__file__) if file_path != "": os.chdir(file_path) products: List[Dict[str, Union[str, List[str]]]] = [] # input data into List with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f: unique = set() lines = f.read().splitlines() df_inter = pd.DataFrame(lines) df_inter.columns = ["json_element"] df_inter["json_element"].apply(json.loads) df = pd.json_normalize(df_inter["json_element"].apply(json.loads)) # to save myself if i do something dumb and run the scraper without deleting the .jsonlines file df.drop_duplicates(subset=["url"], inplace=True) # option: category of product, eg cleanser categories = set(df.category.values) # filter data by given option print("Hello world!") print("Welcome!") print(categories) print("pls enter the category:") cat = str(input()) display_product_names = df[df.category == cat] print(display_product_names[["brand", "product_name"]]) print("pls enter your top 3 products indices, separated by a new line") item1 = int(input()) item2 = int(input()) item3 = int(input()) print("pls enter # of recs:") num_recs = int(input()) reviews = display_product_names.explode("review_data") reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"]) grouped_reviews = reviews.groupby("username")["review_data"].apply(list) multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index) print(multiple_rating_users) print("pls enter sephora userid, if you don't have one just enter 'none':") username = str(input()) if username == "none": print("your ingredients based recommendations are:") cbf = content_recommender( cat, df.product_name.values[item1], df.product_name.values[item2], df.product_name.values[item3], num_recs, df, ) print(cbf[["brand", "product_name", "url", "avg_rating"]]) else: cbf = content_recommender( cat, df.product_name.values[item1], df.product_name.values[item2], df.product_name.values[item3], num_recs + 10, df, ) cf = collab_recommender(cbf, num_recs, username) print("your hybrid recommendations are:") print(cf[["brand", "product_name", "url", "pred_rating"]]) print("thank u for using this service :)")
[ 6738, 11845, 1330, 269, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 11, 4479, 198, 6738, 1976, 8019, 1330, 23449, 62, 19499, 37, 62, 33489, 198, 11748, 33918, 62, 6615, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 302, 198, 6738, 1...
2.471605
1,215
#!/usr/bin/env python # # Copyright (c) 2018, Pycom Limited. # # This software is licensed under the GNU GPL version 3 or any # later version, with permitted additional terms. For more information # see the Pycom Licence v1.0 document supplied with this file, or # available at https://www.pycom.io/opensource/licensing # """ Flash the ESP32 (bootloader, partitions table and factory app). How to call esptool: python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin' """ from esptool import ESP32ROM import os import sys import struct import sqlite3 import argparse import subprocess import threading import time import fw_version import csv working_threads = {} macs_db = None wmacs = {} DB_MAC_UNUSED = 0 DB_MAC_ERROR = -1 DB_MAC_LOCK = -2 DB_MAC_OK = 1 if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 198, 2, 15069, 357, 66, 8, 2864, 11, 9485, 785, 15302, 13, 198, 2, 198, 2, 770, 3788, 318, 11971, 739, 262, 22961, 38644, 2196, 513, 393, 597, 198, 2, 1568, 2196, 11, 351, 104...
2.825137
366
import datetime from django.utils import timezone from django.contrib.auth.models import User from hknweb.events.models import Event, EventType, Rsvp
[ 11748, 4818, 8079, 198, 198, 6738, 42625, 14208, 13, 26791, 1330, 640, 11340, 198, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 27530, 1330, 11787, 198, 6738, 289, 15418, 12384, 13, 31534, 13, 27530, 1330, 8558, 11, 8558, 6030...
3.4
45
from django.shortcuts import redirect from .forms import PrescriptionForm from core.views import is_doctor, is_nurse, is_admin, is_patient from core.models import * from .models import Prescription from django.contrib.auth.decorators import login_required, user_passes_test from django.utils import timezone from django.shortcuts import render from django.core.urlresolvers import reverse def not_admin(user): """ :param user: The User in question :return: True if the user is anything but an Admin """ return not is_admin(user) def is_doctor_or_nurse(user): """ :param user: The User in question :return: True if the user is a Doctor or Nurse """ return is_doctor(user) or is_nurse(user) def get_prescription_list_for(cpatient): """ Generic getter for a specific patient's prescription list :param cpatient: Patient to fetch list for :return: context of Prescription list """ Prescriptions = Prescription.objects.all().filter(patient=cpatient) per = [] for p in Prescriptions.iterator(): per.append(str(dict(p.TIME_CHOICES)[p.Time_units])) p_list = zip(Prescriptions, per) return {"Labels": ["Doctor", "Drug", "Dosage", "Rate"], "Name": str(cpatient), "Prescriptions": p_list}
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 18941, 198, 198, 6738, 764, 23914, 1330, 1763, 6820, 8479, 198, 198, 6738, 4755, 13, 33571, 1330, 318, 62, 35580, 11, 318, 62, 77, 12321, 11, 318, 62, 28482, 11, 318, 62, 26029, 198, 198, ...
2.95612
433
""" 1. Clarification 2. Possible solutions - dfs + memoization - Topological sort 3. Coding 4. Tests """ # T=O(m*n), S=O(m*n) from functools import lru_cache # T=O(m*n), S=O(m*n)
[ 37811, 198, 16, 13, 15420, 2649, 198, 17, 13, 33671, 8136, 198, 220, 220, 220, 532, 288, 9501, 1343, 16155, 1634, 198, 220, 220, 220, 532, 5849, 2770, 3297, 198, 18, 13, 327, 7656, 198, 19, 13, 30307, 198, 37811, 628, 198, 2, 309,...
2.146067
89
#!/usr/bin/env python #coding=utf-8 ''' Remove tailing whitespaces and ensures one and only one empty ending line. ''' import os, re main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 66, 7656, 28, 40477, 12, 23, 198, 198, 7061, 6, 198, 27914, 7894, 278, 13216, 43076, 290, 19047, 530, 290, 691, 530, 6565, 7464, 1627, 13, 198, 7061, 6, 198, 198, 11748, 28686, ...
2.843137
51
from core import * from cameras import * from geometry import * from material import * from lights import * # instantiate and run the program TestPostprocessing2().run()
[ 6738, 4755, 1330, 1635, 198, 6738, 9073, 1330, 1635, 198, 6738, 22939, 1330, 1635, 198, 6738, 2587, 1330, 1635, 198, 6738, 7588, 1330, 1635, 628, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 2...
3.112903
62
#!/usr/bin/env python if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 628, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 1388, 3419, 198 ]
2.346154
26
import os import shutil import codecs import json from cuddlefish.runner import run_app from cuddlefish.rdf import RDFManifest
[ 11748, 28686, 198, 11748, 4423, 346, 198, 11748, 40481, 82, 198, 11748, 33918, 198, 198, 6738, 269, 24500, 11084, 13, 16737, 1330, 1057, 62, 1324, 198, 6738, 269, 24500, 11084, 13, 4372, 69, 1330, 371, 8068, 5124, 8409, 198 ]
3.282051
39
# PyRipple # # Copyright 2015 Gilles Pirio # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. moduleauthor:: Gilles Pirio <gilles.xrp@gmail.com> """ import numpy as np import pandas as pd import mpmath as mp from mpmath import mpf import matplotlib import matplotlib.pyplot as plt import json
[ 2, 9485, 49, 18793, 198, 2, 198, 2, 15069, 1853, 12981, 274, 10334, 952, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846,...
3.454545
231
# -*- coding: utf-8 -*- """ Unit tests for Senna """ from __future__ import unicode_literals from os import environ, path, sep import logging import unittest from nltk.classify import Senna from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger # Set Senna executable path for tests if it is not specified as an environment variable if 'SENNA' in environ: SENNA_EXECUTABLE_PATH = path.normpath(environ['SENNA']) + sep else: SENNA_EXECUTABLE_PATH = '/usr/share/senna-v3.0' senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 26453, 5254, 329, 311, 13713, 198, 37811, 198, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 6738, 28686, 1330, 551, 2268, 11, 3108, ...
2.684466
206
#------------------------------------------------------------------------------- # # Project: EOxServer <http://eoxserver.org> # Authors: Fabian Schindler <fabian.schindler@eox.at> # #------------------------------------------------------------------------------- # Copyright (C) 2015 EOX IT Services GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies of this Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- from itertools import chain from lxml.builder import ElementMaker try: from django.core.urlresolvers import reverse except ImportError: from django.urls import reverse from django.shortcuts import get_object_or_404 from eoxserver.core.config import get_eoxserver_config from eoxserver.core.util.xmltools import ( XMLEncoder, NameSpace, NameSpaceMap ) from eoxserver.resources.coverages import models from eoxserver.services.opensearch.formats import get_formats from eoxserver.services.opensearch.extensions import get_extensions from eoxserver.services.opensearch.config import OpenSearchConfigReader
[ 2, 10097, 24305, 198, 2, 198, 2, 4935, 25, 412, 38208, 10697, 1279, 4023, 1378, 68, 1140, 15388, 13, 2398, 29, 198, 2, 46665, 25, 14236, 666, 3059, 521, 1754, 1279, 36434, 666, 13, 20601, 521, 1754, 31, 68, 1140, 13, 265, 29, 198,...
3.896811
533
text = ''' Victor Hugo's ({}) tale of injustice, heroism and love follows the fortunes of Jean Valjean, an escaped convict determined to put his criminal past behind him. But his attempts to become a respected member of the community are constantly put under threat: by his own conscience, when, owing to a case of mistaken identity, another man is arrested in his place; and by the relentless investigations of the dogged Inspector Javert. It is not simply for himself that Valjean must stay free, however, for he has sworn to protect the baby daughter of Fantine, driven to prostitution by poverty. Norman Denny's ({}) lively English translation is accompanied by an introduction discussing Hugo's political and artistic aims in writing Les Miserables. Victor Hugo (1802-85) wrote volumes of criticism, dramas, satirical verse and political journalism but is best remembered for his novels, especially Notre-Dame de Paris (also known as The Hunchback of Notre-Dame) and Les Miserables, which was adapted into one of the most successful musicals of all time. 'All human life is here' Cameron Mackintosh, producer of the musical Les Miserables 'One of the half-dozen greatest novels of the world' Upton Sinclair 'A great writer - inventive, witty, sly, innovatory' A. S. Byatt, author of Possession ''' name = 'Victor' word1 = 'writer' word2 = 'witty' numbers = "0123456789" small_letters = 'abcdefghijklmnopqrstuvwxyz' big_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' name_index = text.find(name) name_plus3 = text[name_index: name_index+len(name)+3] word1_index = text.find(word1, 0, 100) word2_index = text.find(word2, int(len(text)/2), len(text)) count_characters = text.count('of') is_text_starts_with_name = text.startswith(name) is_text_ends_with_name = text.endswith(name) text = text.format('1822-95', '1807-63') words = text.split(' ') text1 = ''.join(words) text2 = ','.join(words) text3 = '_'.join(words) text4 = ' '.join(words) text5 = text.replace('of', '@') text6 = text.capitalize() text7 = text.replace('a', '') text8 = text.strip() upper_name = name.upper() lower_name = name.lower() is_name_upper = name.isupper() is_name_lower = name.islower() is_big_letters_upper = big_letters.isupper() is_small_letters_lower = small_letters.islower() stringed_integer = '90'.isnumeric() stringed_float = '90.5'.isnumeric() converted_int = int('90') converted_float = float('90.5') converted_string = str(183) is_digit = converted_string[1].isdigit() edges = small_letters[0] + big_letters[-1] body = numbers[1:-1] evens = numbers[::2] odds = numbers[1::2] print('name', name) print('word1', word1) print('word2', word2) print('numbers', numbers) print('small_letters', small_letters) print('big_letters', big_letters) print('name_index', name_index) print('name_plus3', name_plus3) print('word1_index', word1_index) print('word2_index', word2_index) print('count_characters -> \'of\' in the text', count_characters) print('is_text_starts_with_name', is_text_starts_with_name) print('is_text_ends_with_name', is_text_ends_with_name) print('\n\n\n\n\n', 'text', text, '\n\n\n\n\n') print('\n\n\n\n\n', 'words', words, '\n\n\n\n\n') print('\n\n\n\n\n', 'text1', text1, '\n\n\n\n\n') print('\n\n\n\n\n', 'text2', text2, '\n\n\n\n\n') print('\n\n\n\n\n', 'text3', text3, '\n\n\n\n\n') print('\n\n\n\n\n', 'text4', text4, '\n\n\n\n\n') print('\n\n\n\n\n', 'text5', text5, '\n\n\n\n\n') print('\n\n\n\n\n', 'text6', text6, '\n\n\n\n\n') print('\n\n\n\n\n', 'text7', text7, '\n\n\n\n\n') print('\n\n\n\n\n', 'text8', text8, '\n\n\n\n\n') print('upper_name', upper_name) print('lower_name', lower_name) print('is_name_upper', is_name_upper) print('is_name_lower', is_name_lower) print('is_big_letters_upper', is_big_letters_upper) print('is_small_letters_lower', is_small_letters_lower) print('stringed_integer', stringed_integer) print('stringed_float', stringed_float) print('converted_int', converted_int) print('converted_float', converted_float) print('converted_string', converted_string) print('is_digit', is_digit) print('edges', edges) print('body', body) print('evens', evens) print('odds', odds)
[ 5239, 796, 705, 7061, 198, 21944, 273, 25930, 338, 37913, 30072, 12838, 286, 21942, 11, 48661, 290, 1842, 5679, 262, 27806, 286, 11320, 3254, 73, 11025, 11, 281, 13537, 38309, 5295, 284, 1234, 465, 4301, 1613, 2157, 683, 13, 887, 465, ...
2.715984
1,514
from typing import Any, Dict import argparse import numpy as np import torch import torch.nn as nn import torch.nn.functional as F FC1_DIM = 1024 FC2_DIM = 128
[ 6738, 19720, 1330, 4377, 11, 360, 713, 198, 11748, 1822, 29572, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 198, 4851, ...
2.910714
56
from pygame import Surface, font from .basewidget import BaseWidget from frontend import Renderer, WidgetHandler
[ 6738, 12972, 6057, 1330, 20321, 11, 10369, 198, 6738, 764, 12093, 413, 17484, 1330, 7308, 38300, 198, 6738, 2166, 437, 1330, 28703, 11882, 11, 370, 17484, 25060, 628 ]
4.071429
28
import numpy as np import tensorflow as tf """ Do an MNIST classification line by line by LSTM """ (x_train, y_train), \ (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, x_test = x_train/255.0, x_test/255.0 model = tf.keras.Sequential() model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28))) #model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation("softmax")) model.summary() model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer="sgd", metrics=["accuracy"]) model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=100, epochs=100)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 11192, 273, 11125, 355, 48700, 198, 198, 37811, 198, 5211, 281, 29060, 8808, 17923, 1627, 416, 1627, 416, 406, 2257, 44, 198, 37811, 198, 198, 7, 87, 62, 27432, 11, 331, 62, 27432, 828, 3467,...
2.279743
311
# coding: utf-8 # In[1]: import numpy as np import pandas as pd import os from random import shuffle from tqdm import tqdm DATA_DIR = '../input/amazon/' TRAIN_TIF_DIR = DATA_DIR + 'train-tif/' TRAIN_CSV = DATA_DIR + 'train.csv' TEST_TIF_DIR = DATA_DIR + 'test-tif/' IMG_SIZE = 100 LR = 1e-3 MODEL_NAME = 'amazon=-{}-{}.model'.format(LR, '2conv-basic') CLOUD_COVER_LABELS = [ 'clear', 'cloudy', 'haze', 'partly_cloudy'] # read our data and take a look at what we are dealing with train_csv = pd.read_csv(TRAIN_CSV) train_csv.head() tags = pd.DataFrame() for label in CLOUD_COVER_LABELS: tags[label] = train_csv.tags.apply(lambda x: np.where(label in x, 1, 0)) train_csv = pd.concat([train_csv, tags], axis=1) # In[17]: pd.concat([train_csv[train_csv.clear == 1].sample(n=7251), train_csv[train_csv.cloudy == 1].sample(n=7251), train_csv[train_csv.haze == 1], train_csv[train_csv.partly_cloudy == 1].sample(n=7251)], axis=0, ignore_index=True)
[ 198, 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 2, 554, 58, 16, 5974, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292, 355, 279, 67, 198, 11748, 28686, 198, 6738, 4738, 1330, 36273, 198, 6738, 256, 80, 36020, 1330, ...
2.256881
436
"""Base support for POSIX-like platforms.""" import py, os, sys from rpython.translator.platform import Platform, log, _run_subprocess import rpython rpydir = str(py.path.local(rpython.__file__).join('..'))
[ 37811, 14881, 1104, 329, 28069, 10426, 12, 2339, 9554, 526, 15931, 198, 198, 11748, 12972, 11, 28686, 11, 25064, 198, 198, 6738, 374, 29412, 13, 7645, 41880, 13, 24254, 1330, 19193, 11, 2604, 11, 4808, 5143, 62, 7266, 14681, 198, 198, ...
2.957746
71
import glob import bs4 import gzip import pickle import re import os from concurrent.futures import ProcessPoolExecutor as PPE import json from pathlib import Path from hashlib import sha256 import shutil Path('json').mkdir(exist_ok=True) #urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()] #fns = [f'./htmls/{url}' for url in urls] import random files = glob.glob('./htmls/*') random.shuffle(files) args = {} for index, fn in enumerate(files): key = index%8 if args.get(key) is None: args[key] = [] args[key].append(fn) args = [(key,fns) for key,fns in args.items()] #[pmap(arg) for arg in args] with PPE(max_workers=8) as exe: exe.map(pmap, args)
[ 11748, 15095, 198, 11748, 275, 82, 19, 220, 198, 198, 11748, 308, 13344, 198, 11748, 2298, 293, 198, 11748, 302, 198, 11748, 28686, 198, 6738, 24580, 13, 69, 315, 942, 1330, 10854, 27201, 23002, 38409, 355, 350, 11401, 198, 11748, 33918...
2.486301
292
#!/usr/bin/python # # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from google.datacatalog_connectors.apache_atlas import scrape
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 15069, 12131, 3012, 11419, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287...
3.708791
182
import functools import json from os.path import abspath, dirname, exists, join from typing import Dict, Sequence import numpy as np import pandas as pd import torch from pymatgen.core import Composition from torch.utils.data import Dataset def collate_batch(dataset_list): """ Collate a list of data and return a batch for predicting crystal properties. Parameters ---------- dataset_list: list of tuples for each data point. (atom_fea, nbr_fea, nbr_fea_idx, target) atom_fea: torch.Tensor shape (n_i, atom_fea_len) nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len) self_fea_idx: torch.LongTensor shape (n_i, M) nbr_fea_idx: torch.LongTensor shape (n_i, M) target: torch.Tensor shape (1, ) cif_id: str or int Returns ------- N = sum(n_i); N0 = sum(i) batch_atom_weights: torch.Tensor shape (N, 1) batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len) Atom features from atom type batch_self_fea_idx: torch.LongTensor shape (N, M) Indices of mapping atom to copies of itself batch_nbr_fea_idx: torch.LongTensor shape (N, M) Indices of M neighbors of each atom crystal_atom_idx: list of torch.LongTensor of length N0 Mapping from the crystal idx to atom idx target: torch.Tensor shape (N, 1) Target value for prediction batch_comps: list batch_ids: list """ # define the lists batch_atom_weights = [] batch_atom_fea = [] batch_self_fea_idx = [] batch_nbr_fea_idx = [] crystal_atom_idx = [] batch_targets = [] batch_cry_ids = [] cry_base_idx = 0 for i, (inputs, target, *cry_ids) in enumerate(dataset_list): atom_weights, atom_fea, self_fea_idx, nbr_fea_idx = inputs # number of atoms for this crystal n_i = atom_fea.shape[0] # batch the features together batch_atom_weights.append(atom_weights) batch_atom_fea.append(atom_fea) # mappings from bonds to atoms batch_self_fea_idx.append(self_fea_idx + cry_base_idx) batch_nbr_fea_idx.append(nbr_fea_idx + cry_base_idx) # mapping from atoms to crystals crystal_atom_idx.append(torch.tensor([i] * n_i)) # batch the targets and ids batch_targets.append(target) batch_cry_ids.append(cry_ids) # increment the id counter cry_base_idx += n_i return ( ( torch.cat(batch_atom_weights, dim=0), torch.cat(batch_atom_fea, dim=0), torch.cat(batch_self_fea_idx, dim=0), torch.cat(batch_nbr_fea_idx, dim=0), torch.cat(crystal_atom_idx), ), tuple(torch.stack(b_target, dim=0) for b_target in zip(*batch_targets)), *zip(*batch_cry_ids), )
[ 11748, 1257, 310, 10141, 198, 11748, 33918, 198, 6738, 28686, 13, 6978, 1330, 2352, 6978, 11, 26672, 3672, 11, 7160, 11, 4654, 198, 6738, 19720, 1330, 360, 713, 11, 45835, 198, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 19798, 292,...
2.196094
1,280
import unittest from dq import util
[ 11748, 555, 715, 395, 198, 198, 6738, 288, 80, 1330, 7736, 628 ]
3.166667
12
""" PermCheck Check whether array A is a permutation. https://codility.com/demo/results/demoANZ7M2-GFU/ Task description A non-empty zero-indexed array A consisting of N integers is given. A permutation is a sequence containing each element from 1 to N once, and only once. For example, array A such that: A[0] = 4 A[1] = 1 A[2] = 3 A[3] = 2 is a permutation, but array A such that: A[0] = 4 A[1] = 1 A[2] = 3 is not a permutation, because value 2 is missing. The goal is to check whether array A is a permutation. Write a function: def solution(A) that, given a zero-indexed array A, returns 1 if array A is a permutation and 0 if it is not. For example, given array A such that: A[0] = 4 A[1] = 1 A[2] = 3 A[3] = 2 the function should return 1. Given array A such that: A[0] = 4 A[1] = 1 A[2] = 3 the function should return 0. Assume that: N is an integer within the range [1..100,000]; each element of array A is an integer within the range [1..1,000,000,000]. Complexity: expected worst-case time complexity is O(N); expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments). Elements of input arrays can be modified. """
[ 37811, 198, 198, 5990, 76, 9787, 198, 9787, 1771, 7177, 317, 318, 257, 9943, 7094, 13, 198, 5450, 1378, 19815, 879, 13, 785, 14, 9536, 78, 14, 43420, 14, 9536, 78, 1565, 57, 22, 44, 17, 12, 21713, 52, 14, 198, 198, 25714, 6764, ...
2.855835
437
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 2, 15069, 357, 66, 8, 1584, 11, 33448, 11, 18650, 290, 14, 273, 663, 29116, 13, 220, 1439, 2489, 10395, 13, 198, 2, 770, 3788, 318, 10668, 12, 36612, 284, 345, 739, 262, 14499, 2448, 33532, 1...
3.097561
164
'Author: Aimore Resende Riquetti Dutra' '''email: aimorerrd@hotmail.com''' # -------------------------------------------------------------------------------------------------- # # This code can run 4 different models of Reinforcement Learning: # Q-Learning (QL), DQN, SRL (DSRL), SRL+CS(DSRL_object_near) and some other variations of SRL # The setting for each run can be set at the end of the code # It can load and save the models in Excel form # There are some pre-defined environments, but you can create your own # Press G to get intermediate Graphs and P to stop # -------------------------------------------------------------------------------------------------- # import Class import pprint import random import sys import numpy as np import pygame # from pyglet import clock import pandas as pd import time import json from time import sleep import math import matplotlib.pyplot as plt import os import glob ## Comment this part if not using DQN model: # import keras # from keras.models import Sequential # from keras.layers import Dense, Activation, Flatten # from keras.models import model_from_json # from keras.optimizers import sgd # from keras.utils import plot_model # import tensorflow as tf # from keras.backend.tensorflow_backend import set_session # config = tf.ConfigProto() # config.gpu_options.per_process_gpu_memory_fraction = 0.3 # set_session(tf.Session(config=config)) # ------ environments ------ # region COLOR DEFINITION explore_set = set() explore_dict = dict() white = (255, 255, 255) black = (0, 0, 0) grey = (80, 80, 80) red = (255, 0, 0) blue = (0, 0, 255) green = (0, 255, 0) yellow = (250, 250, 0) pink = (250, 105, 180) # endregion # region PANDAS DEFINITION pd.set_option('display.max_columns', None) pd.set_option('display.large_repr', 'info') desired_width = 180 pd.set_option('display.width', desired_width) pd.set_option('precision', 4) # endregion np.random.seed(123) # For reproducibility pygame.init() # Pygame initialialization pp = pprint.PrettyPrinter(indent=4) actions = ['up', 'down', 'right', 'left'] actions_dict = {'up':0, 'down':1, 'right':2, 'left':3} p_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d] # clock.tick(20) def pop(self): '''Removes a layer instance on top of the layer stack. ''' while self.outputs: self.layers.pop() if not self.layers: self.outputs = [] self.inbound_nodes = [] self.outbound_nodes = [] else: self.layers[-1].outbound_nodes = [] self.outputs = [self.layers[-1].output] self.built = False # region REWARDS negative_reward = 5 # Negative Reward positive_reward = 1 # Positive Reward step_reward = 0 # Reward received by each step # endregion # ------ environments configuration (till line 640) ------ # region TEXT FONTS DEFINITION smallfont = pygame.font.SysFont('comicsansms', 13) smallfont_act = pygame.font.SysFont('arial', 13) mediumfont_act = pygame.font.SysFont('arial', 18, bold=True) pygame.font.init() # endregion # region DISPLAY FUNCTIONS # endregion # region CREATE OBJ_LIST FROM STATE AND RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS ''' CREATE obj_list - FROM env ''' ''' CREATE A RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS - FROM obj_list ''' # endregion # region DRAW OBJECTS x_zero_screen = 50 y_zero_screen = 180 size_obj = 37 # endregion # region CREATE THE STATE FROM THE ENVIRONMENT # endregion # region ENVIRONMENT CONFIGURATION # endregion # region SAVE - LOAD - CREATE # endregion # ------ RL algorithms (till line 1030) ------ # region DQN - CONFIGURATIONS # alfa = 1 # Learning Rate gamma = 0.9 # Temporal Discount Factor ''' PROGRAM START ''' __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) # -------------------------------------------------------------------------------------------------- # ''' SELECT PARAMETERS TO RUN THE SOFTWARE ''' # environment configuration Env = 11 Alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN", "DSRL_dist", "DSRL_dist_type", "DSRL_dist_type_near", "DSRL_dist_type_near_propNeg", "DSRL_object"] Alg = Alg_list[2] # Select the algorithm to be used Learn = False # To update its knowledge Load = True # To load a learned model Load_path = "/Results/Train/Env_11/Train_Env_11_DSRL 02 41 20 05-05-21" # algorithm configuration Samples = 2 # Usually 10 samples (repeat 100 episodes for 10 times) Print = True # Print some info in the terminal Auto = True # Agent moves Automatic or if False it moves by pressing the Spacebar key Server = False # If running in the server since # change Prob to 1 for probe training?? Prob = 0.3 # Probability to make a random move (exploration rate) Cond_to_end = "max_steps" # Choose from below (there are 4) Save = False # Save the model speed = 0.05 # seconds per frame # Cond_to_end = "max_steps" # Cond_to_end = "coll_all" # Cond_to_end = "only_negative" Episodes = 500 # Usually 1000 or 100 # region DQN Model Configurations: # max_memory_list = [5, 5, 5, 30, 30, 30, 100, 100, 100] # hidden_size_list = [5, 30, 270, 5, 30, 270, 5, 30, 270] # batch_size_list = [1, 1, 1, 10, 10, 10, 32, 32, 32] max_memory_list = [100, 100, 100, 300, 300, 300, 900, 900, 900] hidden_size_list = [5, 10, 15, 5, 10, 15, 5, 10, 15] batch_size_list = [32, 32, 32, 32, 32, 32, 32, 32, 32] optimizer_list = ["adam", "rms_opt"] n_actions = 4 # [move_up, move_down, move_left, move_right] # endregion Net_comb_param = 4 # ------------------------------------------------------------------------------------------- # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) # ------------------------------------------------------------------------------------------- # ''' REPEAT DQN Net_Comb_Param ''' # for i in range(9): # Net_comb_param = i # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) ''' REPEAT Alg for a list of Env ''' # env_list = [2,3] # for Env in env_list: # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save) ''' Alg_list for Env_list ''' # env_list = [2,3] # alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN"] # for Env in env_list: # for Alg in alg_list: # run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
[ 6, 13838, 25, 317, 9401, 1874, 38396, 371, 1557, 24851, 360, 35076, 6, 201, 198, 7061, 6, 12888, 25, 4031, 11934, 4372, 31, 8940, 4529, 13, 785, 7061, 6, 201, 198, 2, 16529, 3880, 438, 1303, 201, 198, 2, 770, 2438, 460, 1057, 604,...
2.526812
2,760
import logging from typing import Tuple import bpy from mathutils import Vector from .object import get_objs logger = logging.getLogger(__name__) ################################################################################################ # Methods # # ============================================================================================== def compute(self): """Compute the scene bounding box values.""" objs = get_objs(self.scene, exclude_collections=self.exclude_collections, mesh_only=True) logger.debug("Found %i objects in scene %s", len(objs), self.scene.name) for obj in objs: obb = obj.bound_box for i in range(8): p = obj.matrix_world @ Vector(obb[i]) self.x_min = min(self.x_min, p[0]) self.x_max = max(self.x_max, p[0]) self.y_min = min(self.y_min, p[1]) self.y_max = max(self.y_max, p[1]) self.z_min = min(self.z_min, p[2]) self.z_max = max(self.z_max, p[2]) if objs: self.center = Vector(((self.x_max + self.x_min) / 2, (self.y_max + self.y_min) / 2, (self.z_max + self.z_min) / 2)) logger.debug(str(self)) # ============================================================================================== def get_min_vector(self): """Get minimum axis.""" return Vector((self.x_min, self.y_min, self.z_min)) # ============================================================================================== def get_max_vector(self): """Get maximum axis.""" return Vector((self.x_max, self.y_max, self.z_max)) ################################################################################################ # Builtin methods # # ==============================================================================================
[ 198, 11748, 18931, 198, 6738, 19720, 1330, 309, 29291, 198, 198, 11748, 275, 9078, 198, 6738, 10688, 26791, 1330, 20650, 198, 198, 6738, 764, 15252, 1330, 651, 62, 672, 8457, 198, 198, 6404, 1362, 796, 18931, 13, 1136, 11187, 1362, 7, ...
2.535443
790
# coding=utf-8 # Copyright 2019 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PPO in JAX. Notation: B, scalar - batch size T, scalar - number of time-steps in a trajectory, or the value of the padded time-step dimension. OBS, tuple - shape of a singular observation from the environment. Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3) A, scalar - Number of actions, assuming a discrete space. Policy and Value function signatures: Policy Function :: [B, T] + OBS -> [B, T, A] Value Function :: [B, T] + OBS -> [B, T, 1] Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1]) i.e. the policy net should take a batch of *trajectories* and at each time-step in each batch deliver a probability distribution over actions. NOTE: It doesn't return logits, rather the expectation is that it returns log-probabilities instead. NOTE: The policy and value functions need to take care to not take into account future time-steps while deciding the actions (or value) for the current time-step. Policy and Value Function produces a tuple of the expected output of a policy function and a value function. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import pickle import time from absl import logging import gym from jax import grad from jax import jit from jax import lax from jax import numpy as np from jax import random as jax_random import numpy as onp from tensor2tensor.envs import env_problem from tensor2tensor.envs import env_problem_utils from tensor2tensor.trax import jaxboard from tensor2tensor.trax import layers from tensor2tensor.trax import optimizers as trax_opt from tensor2tensor.trax import trax from tensorflow.io import gfile DEBUG_LOGGING = False GAMMA = 0.99 LAMBDA = 0.95 EPSILON = 0.1 EPOCHS = 50 # 100 NUM_OPTIMIZER_STEPS = 100 PRINT_EVERY_OPTIMIZER_STEP = 20 BATCH_TRAJECTORIES = 32 def policy_and_value_net(rng_key, batch_observations_shape, num_actions, bottom_layers_fn=None, two_towers=True): """A policy and value net function.""" # Layers. # Now, with the current logits, one head computes action probabilities and the # other computes the value function. # NOTE: The LogSoftmax instead of the Softmax because of numerical stability. net = None if not two_towers: tower = [] if bottom_layers_fn is None else bottom_layers_fn() tower.extend([ layers.Branch( layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()), layers.Dense(1)) ]) net = layers.Serial(*tower) else: tower1 = [] if bottom_layers_fn is None else bottom_layers_fn() tower2 = [] if bottom_layers_fn is None else bottom_layers_fn() tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()]) tower2.extend([layers.Dense(1)]) net = layers.Branch( layers.Serial(*tower1), layers.Serial(*tower2), ) assert net return net.initialize(batch_observations_shape, rng_key), net # Should this be collect 'n' trajectories, or # Run the env for 'n' steps and take completed trajectories, or # Any other option? # TODO(afrozm): Replace this with EnvProblem? def collect_trajectories(env, policy_fun, num_trajectories=1, policy=env_problem_utils.CATEGORICAL_SAMPLING, max_timestep=None, boundary=20, epsilon=0.1, reset=True, rng=None): """Collect trajectories with the given policy net and behaviour. Args: env: A gym env interface, for now this is not-batched. policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable. num_trajectories: int, number of trajectories. policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e. how to use the policy_fun to return an action. max_timestep: int or None, the index of the maximum time-step at which we return the trajectory, None for ending a trajectory only when env returns done. boundary: int, boundary for padding, used in EnvProblem envs. epsilon: float, the epsilon for `epsilon-greedy` policy. reset: bool, true if we want to reset the envs. The envs are also reset if max_max_timestep is None or < 0 rng: jax rng, splittable. Returns: A tuple (trajectory, number of trajectories that are done) trajectory: list of (observation, action, reward) tuples, where each element `i` is a tuple of numpy arrays with shapes as follows: observation[i] = (B, T_i + 1) action[i] = (B, T_i) reward[i] = (B, T_i) """ assert isinstance(env, env_problem.EnvProblem) # This is an env_problem, run its collect function. return env_problem_utils.play_env_problem_with_policy( env, policy_fun, num_trajectories=num_trajectories, max_timestep=max_timestep, boundary=boundary, policy_sampling=policy, eps=epsilon, reset=reset, rng=rng) # This function can probably be simplified, ask how? # Can we do something much simpler than lax.pad, maybe np.pad? # Others? def get_padding_value(dtype): """Returns the padding value given a dtype.""" padding_value = None if dtype == np.uint8: padding_value = np.uint8(0) elif dtype == np.uint16: padding_value = np.uint16(0) elif dtype == np.float32 or dtype == np.float64: padding_value = 0.0 else: padding_value = 0 assert padding_value is not None return padding_value # TODO(afrozm): Use np.pad instead and make jittable? def pad_trajectories(trajectories, boundary=20): """Pad trajectories to a bucket length that is a multiple of boundary. Args: trajectories: list[(observation, actions, rewards)], where each observation is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the length of the list being B (batch size). boundary: int, bucket length, the actions and rewards are padded to integer multiples of boundary. Returns: tuple: (padding lengths, reward_mask, padded_observations, padded_actions, padded_rewards) where padded_observations is shaped (B, T+1) + OBS and padded_actions, padded_rewards & reward_mask are shaped (B, T). Where T is max(t) rounded up to an integer multiple of boundary. padded_length is how much padding we've added and reward_mask is 1s for actual rewards and 0s for the padding. """ # Let's compute max(t) over all trajectories. t_max = max(r.shape[0] for (_, _, r) in trajectories) # t_max is rounded to the next multiple of `boundary` boundary = int(boundary) bucket_length = boundary * int(np.ceil(float(t_max) / boundary)) # So all obs will be padded to t_max + 1 and actions and rewards to t_max. padded_observations = [] padded_actions = [] padded_rewards = [] padded_lengths = [] reward_masks = [] for (o, a, r) in trajectories: # Determine the amount to pad, this holds true for obs, actions and rewards. num_to_pad = bucket_length + 1 - o.shape[0] padded_lengths.append(num_to_pad) if num_to_pad == 0: padded_observations.append(o) padded_actions.append(a) padded_rewards.append(r) reward_masks.append(onp.ones_like(r, dtype=np.int32)) continue # First pad observations. padding_config = [(0, num_to_pad, 0)] for _ in range(o.ndim - 1): padding_config.append((0, 0, 0)) padding_config = tuple(padding_config) padding_value = get_padding_value(o.dtype) action_padding_value = get_padding_value(a.dtype) reward_padding_value = get_padding_value(r.dtype) padded_obs = lax.pad(o, padding_value, padding_config) padded_observations.append(padded_obs) # Now pad actions and rewards. assert a.ndim == 1 and r.ndim == 1 padding_config = ((0, num_to_pad, 0),) padded_action = lax.pad(a, action_padding_value, padding_config) padded_actions.append(padded_action) padded_reward = lax.pad(r, reward_padding_value, padding_config) padded_rewards.append(padded_reward) # Also create the mask to use later. reward_mask = onp.ones_like(r, dtype=np.int32) reward_masks.append(lax.pad(reward_mask, 0, padding_config)) return padded_lengths, np.stack(reward_masks), np.stack( padded_observations), np.stack(padded_actions), np.stack(padded_rewards) # TODO(afrozm): JAX-ify this, this is too slow for pong. def rewards_to_go(rewards, mask, gamma=0.99): r"""Computes rewards to go. Reward to go is defined as follows, the discounted reward that we have to yet collect, going forward from this point, i.e.: r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l}) Args: rewards: np.ndarray of shape (B, T) of rewards. mask: np.ndarray of shape (B, T) of mask for the rewards. gamma: float, discount factor. Returns: rewards to go, np.ndarray of shape (B, T). """ B, T = rewards.shape # pylint: disable=invalid-name,unused-variable masked_rewards = rewards * mask # (B, T) # We use the following recurrence relation, derived from the equation above: # # r2g[t+1] = (r2g[t] - r[t]) / gamma # # This means we'll need to calculate r2g[0] first and then r2g[1] and so on .. # # **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0 # and gamma < 1.0, so the division keeps increasing. # # So we just run the recurrence in reverse, i.e. # # r2g[t] = r[t] + (gamma*r2g[t+1]) # # This is much better, but might have lost updates since the (small) rewards # at earlier time-steps may get added to a (very?) large sum. # Compute r2g_{T-1} at the start and then compute backwards in time. r2gs = [masked_rewards[:, -1]] # Go from T-2 down to 0. for t in reversed(range(T - 1)): r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1])) # The list should have length T. assert T == len(r2gs) # First we stack them in the correct way to make it (B, T), but these are # still from newest (T-1) to oldest (0), so then we flip it on time axis. return np.flip(np.stack(r2gs, axis=1), axis=1) # TODO(afrozm): JAX-ify this, this is too slow for pong. def deltas(predicted_values, rewards, mask, gamma=0.99): r"""Computes TD-residuals from V(s) and rewards. Where a `delta`, i.e. a td-residual is defined as: delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}. Args: predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was squeezed. These represent V(s_bt) for b < B and t < T+1 rewards: ndarray of shape (B, T) of rewards. mask: ndarray of shape (B, T) of mask for rewards. gamma: float, discount factor. Returns: ndarray of shape (B, T) of one-step TD-residuals. """ # `d`s are basically one-step TD residuals. d = [] _, T = rewards.shape # pylint: disable=invalid-name for t in range(T): d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) - predicted_values[:, t]) return np.array(d).T * mask def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99): r"""Computes the GAE advantages given the one step TD-residuals. The formula for a GAE advantage estimator is as follows: A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}). Internally we just call rewards_to_go, since it is the same computation. Args: td_deltas: np.ndarray of shape (B, T) of one step TD-residuals. mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the case that the `td_deltas` are already masked correctly since they are produced by `deltas(...)` lambda_: float, lambda parameter for GAE estimators. gamma: float, lambda parameter for GAE estimators. Returns: GAE advantage estimates. """ return rewards_to_go(td_deltas, mask, lambda_ * gamma) def chosen_probabs(probab_observations, actions): """Picks out the probabilities of the actions along batch and time-steps. Args: probab_observations: ndarray of shape `[B, T+1, A]`, where probab_observations[b, t, i] contains the log-probability of action = i at the t^th time-step in the b^th trajectory. actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which action was chosen in the b^th trajectory's t^th time-step. Returns: `[B, T]` ndarray with the log-probabilities of the chosen actions. """ B, T = actions.shape # pylint: disable=invalid-name assert (B, T + 1) == probab_observations.shape[:2] return probab_observations[np.arange(B)[:, None], np.arange(T), actions] def compute_probab_ratios(p_new, p_old, actions, reward_mask): """Computes the probability ratios for each time-step in a trajectory. Args: p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy network assigns to all the actions at each time-step in each batch using the old parameters. p_old: ndarray of shape [B, T+1, A], same as above, but using old policy network parameters. actions: ndarray of shape [B, T] where each element is from [0, A). reward_mask: ndarray of shape [B, T] masking over probabilities. Returns: probab_ratios: ndarray of shape [B, T], where probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}} """ B, T = actions.shape # pylint: disable=invalid-name assert (B, T + 1) == p_old.shape[:2] assert (B, T + 1) == p_new.shape[:2] logp_old = chosen_probabs(p_old, actions) logp_new = chosen_probabs(p_new, actions) assert (B, T) == logp_old.shape assert (B, T) == logp_new.shape # Since these are log-probabilities, we just subtract them. probab_ratios = np.exp(logp_new - logp_old) * reward_mask assert (B, T) == probab_ratios.shape return probab_ratios def clipped_probab_ratios(probab_ratios, epsilon=0.2): return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon) def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2): return np.minimum( probab_ratios * advantages, clipped_probab_ratios(probab_ratios, epsilon=epsilon) * advantages) * reward_mask def get_time(t1, t2=None): if t2 is None: t2 = time.time() return round((t2 - t1) * 1000, 2) def approximate_kl(log_prob_new, log_prob_old, mask): """Computes the approximate KL divergence between the old and new log-probs. Args: log_prob_new: (B, T+1, A) log probs new log_prob_old: (B, T+1, A) log probs old mask: (B, T) Returns: Approximate KL. """ diff = log_prob_old - log_prob_new # Cut the last time-step out. diff = diff[:, :-1] # Mask out the irrelevant part. diff *= mask[:, :, np.newaxis] # make mask (B, T, 1) # Average on non-masked part. return np.sum(diff) / np.sum(mask) def masked_entropy(log_probs, mask): """Computes the entropy for the given log-probs. Args: log_probs: (B, T+1, A) log probs mask: (B, T) mask. Returns: Entropy. """ # Cut the last time-step out. lp = log_probs[:, :-1] # Mask out the irrelevant part. lp *= mask[:, :, np.newaxis] # make mask (B, T, 1) p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1) # Average on non-masked part and take negative. return -(np.sum(lp * p) / np.sum(mask)) def evaluate_policy(eval_env, get_predictions, boundary, max_timestep=20000, rng=None): """Evaluate the policy.""" avg_rewards = {} for policy in [ env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING, env_problem_utils.EPSILON_GREEDY ]: trajs, _ = env_problem_utils.play_env_problem_with_policy( eval_env, get_predictions, boundary=boundary, max_timestep=max_timestep, reset=True, policy_sampling=policy, rng=rng) avg_rewards[policy] = float(sum( np.sum(traj[2]) for traj in trajs)) / len(trajs) return avg_rewards def maybe_restore_params(output_dir, policy_and_value_net_params): """Maybe restore the params from the checkpoint dir. Args: output_dir: Directory where saved model checkpoints are stored. policy_and_value_net_params: Default params, returned if model is'nt found. Returns: triple (restore (bool), params, iter(int)) where iter is the epoch from which we restored the params, 0 is restore = False. """ model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl")) if not model_files: return False, policy_and_value_net_params, 0 model_file = sorted(model_files)[-1] model_file_basename = os.path.basename(model_file) # model-??????.pkl i = int(filter(str.isdigit, model_file_basename)) with gfile.GFile(model_file, "rb") as f: policy_and_value_net_params = pickle.load(f) return True, policy_and_value_net_params, i def training_loop( env=None, epochs=EPOCHS, policy_and_value_net_fun=None, policy_and_value_optimizer_fun=None, batch_size=BATCH_TRAJECTORIES, num_optimizer_steps=NUM_OPTIMIZER_STEPS, print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP, target_kl=0.01, boundary=20, max_timestep=None, max_timestep_eval=20000, random_seed=None, gamma=GAMMA, lambda_=LAMBDA, epsilon=EPSILON, c1=1.0, c2=0.01, output_dir=None, eval_every_n=1000, eval_env=None, done_frac_for_policy_save=0.5, enable_early_stopping=True, env_name=None, ): """Runs the training loop for PPO, with fixed policy and value nets.""" assert env assert output_dir assert env_name gfile.makedirs(output_dir) # Create summary writers and history. train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train")) timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing")) eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval")) train_sw.text("env_name", env_name) timing_sw.text("env_name", env_name) eval_sw.text("env_name", env_name) jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed) # Batch Observations Shape = [-1, -1] + OBS, because we will eventually call # policy and value networks on shape [B, T] +_OBS batch_observations_shape = (-1, -1) + env.observation_space.shape assert isinstance(env.action_space, gym.spaces.Discrete) num_actions = env.action_space.n jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) # Initialize the policy and value network. policy_and_value_net_params, policy_and_value_net_apply = ( policy_and_value_net_fun(key1, batch_observations_shape, num_actions)) # Maybe restore the policy params. If there is nothing to restore, then # iteration = 0 and policy_and_value_net_params are returned as is. restore, policy_and_value_net_params, iteration = ( maybe_restore_params(output_dir, policy_and_value_net_params)) if restore: logging.info("Restored parameters from iteration [%d]", iteration) # We should start from the next iteration. iteration += 1 policy_and_value_net_apply = jit(policy_and_value_net_apply) # Initialize the optimizers. policy_and_value_optimizer = ( policy_and_value_optimizer_fun(policy_and_value_net_params)) (policy_and_value_opt_state, policy_and_value_opt_update, policy_and_value_get_params) = policy_and_value_optimizer num_trajectories_done = 0 last_saved_at = 0 logging.info("Starting the PPO training loop.") for i in range(iteration, epochs): epoch_start_time = time.time() # Params we'll use to collect the trajectories. policy_and_value_net_params = policy_and_value_get_params( policy_and_value_opt_state) # A function to get the policy and value predictions. def get_predictions(observations, rng=None): """Returns log-probs, value predictions and key back.""" key, key1 = jax_random.split(rng, num=2) log_probs, value_preds = policy_and_value_net_apply( observations, policy_and_value_net_params, rng=key1) return log_probs, value_preds, key # Evaluate the policy. policy_eval_start_time = time.time() if ((i + 1) % eval_every_n == 0) or (i == epochs - 1): jax_rng_key, key = jax_random.split(jax_rng_key, num=2) logging.vlog(1, "Epoch [% 6d] evaluating policy.", i) avg_reward = evaluate_policy( eval_env, get_predictions, boundary, max_timestep=max_timestep_eval, rng=key) for k, v in avg_reward.items(): eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i) logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v) policy_eval_time = get_time(policy_eval_start_time) trajectory_collection_start_time = time.time() logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i) jax_rng_key, key = jax_random.split(jax_rng_key) trajs, num_done = collect_trajectories( env, policy_fun=get_predictions, num_trajectories=batch_size, max_timestep=max_timestep, boundary=boundary, rng=key, reset=(i == 0) or restore, epsilon=(10.0 / (i + 10.0))) # this is a different epsilon. trajectory_collection_time = get_time(trajectory_collection_start_time) logging.vlog(1, "Collecting trajectories took %0.2f msec.", trajectory_collection_time) avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs) max_reward = max(np.sum(traj[2]) for traj in trajs) min_reward = min(np.sum(traj[2]) for traj in trajs) train_sw.scalar("train/mean_reward", avg_reward, step=i) logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s", avg_reward, max_reward, min_reward, [float(np.sum(traj[2])) for traj in trajs]) logging.vlog(1, "Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]", float(sum(len(traj[0]) for traj in trajs)) / len(trajs), max(len(traj[0]) for traj in trajs), min(len(traj[0]) for traj in trajs)) logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs]) padding_start_time = time.time() (_, reward_mask, padded_observations, padded_actions, padded_rewards) = pad_trajectories( trajs, boundary=boundary) padding_time = get_time(padding_start_time) logging.vlog(1, "Padding trajectories took %0.2f msec.", get_time(padding_start_time)) logging.vlog(1, "Padded Observations' shape [%s]", str(padded_observations.shape)) logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape)) logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape)) # Calculate log-probabilities and value predictions of the trajectories. # We'll pass these to the loss functions so as to not get recomputed. # NOTE: # There is a slight problem here, if the policy network contains # stochasticity in the log-probabilities (ex: dropout), then calculating # these again here is not going to be correct and should be done in the # collect function. log_prob_recompute_start_time = time.time() jax_rng_key, key = jax_random.split(jax_rng_key) log_probabs_traj, value_predictions_traj, _ = get_predictions( padded_observations, rng=key) log_prob_recompute_time = get_time(log_prob_recompute_start_time) # Some assertions. B, T = padded_actions.shape # pylint: disable=invalid-name assert (B, T) == padded_rewards.shape assert (B, T) == reward_mask.shape assert (B, T + 1) == padded_observations.shape[:2] assert (B, T + 1) + env.observation_space.shape == padded_observations.shape # Linear annealing from 0.1 to 0.0 # epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 - # (i / # (epochs - 1))) # Constant epsilon. epsilon_schedule = epsilon # Compute value and ppo losses. jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) logging.vlog(2, "Starting to compute P&V loss.") loss_compute_start_time = time.time() cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = ( combined_loss( policy_and_value_net_params, log_probabs_traj, value_predictions_traj, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, c1=c1, c2=c2, rng=key1)) loss_compute_time = get_time(loss_compute_start_time) logging.vlog( 1, "Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.", cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus, get_time(loss_compute_start_time)) jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2) logging.vlog(1, "Policy and Value Optimization") optimization_start_time = time.time() keys = jax_random.split(key1, num=num_optimizer_steps) for j in range(num_optimizer_steps): k1, k2, k3 = jax_random.split(keys[j], num=3) t = time.time() # Update the optimizer state. policy_and_value_opt_state = policy_and_value_opt_step( j, policy_and_value_opt_state, policy_and_value_opt_update, policy_and_value_get_params, policy_and_value_net_apply, log_probabs_traj, value_predictions_traj, padded_observations, padded_actions, padded_rewards, reward_mask, c1=c1, c2=c2, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, rng=k1) # Compute the approx KL for early stopping. new_policy_and_value_net_params = policy_and_value_get_params( policy_and_value_opt_state) log_probab_actions_new, _ = policy_and_value_net_apply( padded_observations, new_policy_and_value_net_params, rng=k2) approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj, reward_mask) early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl if early_stopping: logging.vlog( 1, "Early stopping policy and value optimization at iter: %d, " "with approx_kl: %0.2f", j, approx_kl) # We don't return right-away, we want the below to execute on the last # iteration. t2 = time.time() if (((j + 1) % print_every_optimizer_steps == 0) or (j == num_optimizer_steps - 1) or early_stopping): # Compute and log the loss. (loss_combined, loss_ppo, loss_value, entropy_bonus) = ( combined_loss( new_policy_and_value_net_params, log_probabs_traj, value_predictions_traj, policy_and_value_net_apply, padded_observations, padded_actions, padded_rewards, reward_mask, gamma=gamma, lambda_=lambda_, epsilon=epsilon_schedule, c1=c1, c2=c2, rng=k3)) logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec", get_time(t, t2)) logging.vlog( 1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->" " [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined, loss_value, loss_ppo, entropy_bonus) if early_stopping: break optimization_time = get_time(optimization_start_time) logging.vlog( 1, "Total Combined Loss reduction [%0.2f]%%", (100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss))) # Save parameters every time we see the end of at least a fraction of batch # number of trajectories that are done (not completed -- completed includes # truncated and done). # Also don't save too frequently, enforce a minimum gap. # Or if this is the last iteration. policy_save_start_time = time.time() num_trajectories_done += num_done if (((num_trajectories_done >= done_frac_for_policy_save * batch_size) and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)): logging.vlog(1, "Epoch [% 6d] saving model.", i) params_file = os.path.join(output_dir, "model-%06d.pkl" % i) with gfile.GFile(params_file, "wb") as f: pickle.dump(policy_and_value_net_params, f) # Reset this number. num_trajectories_done = 0 last_saved_at = i policy_save_time = get_time(policy_save_start_time) epoch_time = get_time(epoch_start_time) logging.info( "Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined" " Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward, max_reward, avg_reward, loss_combined, loss_value, loss_ppo, entropy_bonus) timing_dict = { "epoch": epoch_time, "policy_eval": policy_eval_time, "trajectory_collection": trajectory_collection_time, "padding": padding_time, "log_prob_recompute": log_prob_recompute_time, "loss_compute": loss_compute_time, "optimization": optimization_time, "policy_save": policy_save_time, } for k, v in timing_dict.items(): timing_sw.scalar("timing/%s" % k, v, step=i) max_key_len = max(len(k) for k in timing_dict) timing_info_list = [ "%s : % 10.2f" % (k.rjust(max_key_len + 1), v) for k, v in sorted(timing_dict.items()) ] logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list)) # Reset restore. restore = False # Flush summary writers once in a while. if (i+1) % 1000 == 0 or i == epochs - 1: train_sw.flush() timing_sw.flush() eval_sw.flush()
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 13130, 383, 309, 22854, 17, 51, 22854, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, ...
2.377276
13,017
""" WS-DAN models Hu et al., "See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification", arXiv:1901.09891 """ import logging import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import models.resnet as resnet from models.inception import inception_v3, BasicConv2d import models.coatnet as coatnet import random __all__ = ['WSDAN_CAL'] EPSILON = 1e-6 # Bilinear Attention Pooling
[ 37811, 198, 19416, 12, 35, 1565, 4981, 198, 38202, 2123, 435, 1539, 198, 1, 6214, 11625, 7413, 15616, 1012, 13416, 25, 28788, 306, 3115, 16149, 6060, 2447, 14374, 7311, 329, 17867, 12, 8642, 1328, 15612, 40984, 1600, 198, 283, 55, 452, ...
3.117647
153
#!/usr/bin/env python import mirheo as mir dt = 0.001 ranks = (1, 1, 1) domain = (8, 16, 8) force = (1.0, 0, 0) density = 4 u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True) pv = mir.ParticleVectors.ParticleVector('pv', mass = 1) ic = mir.InitialConditions.Uniform(number_density=density) u.registerParticleVector(pv=pv, ic=ic) dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5) u.registerInteraction(dpd) plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1)) plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1)) u.registerWall(plate_lo, 0) u.registerWall(plate_hi, 0) vv = mir.Integrators.VelocityVerlet("vv") frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density) u.setWall(plate_lo, pv) u.setWall(plate_hi, pv) for p in (pv, frozen): u.setInteraction(dpd, p, pv) vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force) u.registerIntegrator(vv_dp) u.setIntegrator(vv_dp, pv) sample_every = 2 dump_every = 1000 bin_size = (1., 1., 0.5) u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-')) u.run(7002) # nTEST: walls.analytic.plates # cd walls/analytic # rm -rf h5 # mir.run --runargs "-n 2" ./plates.py # mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 11748, 5720, 258, 78, 355, 5720, 198, 198, 28664, 796, 657, 13, 8298, 198, 198, 81, 2283, 220, 796, 357, 16, 11, 352, 11, 352, 8, 198, 27830, 796, 357, 23, 11, 1467, 11, 807...
2.244118
680
#!/usr/bin/python # -*- coding: utf-8 -*- import abc import bs4 import functools import utilities def loadable(func_name): """Decorator for getters that require a load() upon first access. :type func_name: function :param func_name: class method that requires that load() be called if the class's _attribute value is None :rtype: function :return: the decorated class method. """ return inner
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 450, 66, 198, 11748, 275, 82, 19, 198, 11748, 1257, 310, 10141, 198, 198, 11748, 20081, 198, 198, 4299, 3440, 540, 7,...
3.261905
126
"""p2 core http responses""" from wsgiref.util import FileWrapper from django.http import StreamingHttpResponse from p2.core.constants import ATTR_BLOB_MIME, ATTR_BLOB_SIZE_BYTES from p2.core.models import Blob
[ 37811, 79, 17, 4755, 2638, 9109, 37811, 198, 6738, 266, 45213, 557, 69, 13, 22602, 1330, 9220, 36918, 2848, 198, 198, 6738, 42625, 14208, 13, 4023, 1330, 43124, 43481, 31077, 198, 198, 6738, 279, 17, 13, 7295, 13, 9979, 1187, 1330, 51...
2.931507
73
# Generated by Django 3.0.6 on 2020-05-28 09:07 from django.conf import settings from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 15, 13, 21, 319, 12131, 12, 2713, 12, 2078, 7769, 25, 2998, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14...
3.019231
52
""" Data: Temperature and Salinity time series from SIO Scripps Pier Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m) Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m) - Timestamp included beginning in 1990 """ # imports import sys,os import pandas as pd import numpy as np import matplotlib.pyplot as plt import datetime from scipy import signal import scipy.stats as ss import SIO_modules as SIO_mod from importlib import reload reload(SIO_mod) # read in temp and sal files sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27) temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26) ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx') ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx') PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1) path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/' # convert year, month, day columns to single DATE column sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']]) temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']]) ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True) PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m') # remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate for i in range(0,len(sal_data['SURF_SAL_PSU'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan for i in range(0,len(temp_data['SURF_TEMP_C'])): if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4): sal_data['SURF_SAL_PSU'][i] = np.nan # interpolate missing temp and sal data sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate() temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate() sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1] # remove the average from the sal and temp data and create new columns sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean() temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean() # remove trends from the sal and temp data and create new columns sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1) sal_fit_fn = np.poly1d(sal_fit) temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1) temp_fit_fn = np.poly1d(temp_fit) sal_fit_value = sal_fit_fn(sal_data.index) temp_fit_value = temp_fit_fn(temp_data.index) sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean() temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean() # # 1. FFT the SIO Data # t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND']) # # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index # fs = 1 # sampling frequency, once per day # fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days) # w = fc / (fs / 2) #normalize the frequency # b, a = signal.butter(4, w, 'low') # temp_output = signal.filtfilt(b, a, t_spec) # # 3. Inverse FFT of filtered SIO data # temp_ifft = np.fft.irfft(temp_output,n=len(temp_output)) # # 4. Subsample new SIO time series with same delta t as ENSO index (once per month) # temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1) # temp_ifft_len = temp_ifft_sampled[0:618] # x = np.linspace(0,18770, 18770) # plt.figure() # plt.loglog(x, temp_ifft) # plt.show() # butterworth low pass filter for temperature and salinity fs = 1 # sampling frequency, once per day fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days) w = fc / (fs / 2) #normalize the frequency b, a = signal.butter(4, w, 'low') temp_output = signal.filtfilt(b, a, temp_tri) sal_output = signal.filtfilt(b, a, sal_tri) temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251 # create dataframe with spectra for each variable spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft']) spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft']) spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft']) spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft']) # for coherence, start all records at 1916-01-01 # ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254] # Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31 # PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985] # compute spectral variables for each variable for j in range(0,4): data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]] freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j]) if j == 0: spectra_temp_df['Temp_freq'] = freq spectra_temp_df['Temp_spec'] = spec spectra_temp_df['Temp_fft'] = fft if j == 1: spectra_sal_df['Sal_freq'] = freq spectra_sal_df['Sal_spec'] = spec spectra_sal_df['Sal_fft'] = fft if j == 2: spectra_PDO_df['PDO_freq'] = freq spectra_PDO_df['PDO_spec'] = spec spectra_PDO_df['PDO_fft'] = fft if j == 3: spectra_ENSO_df['ENSO_freq'] = freq spectra_ENSO_df['ENSO_spec'] = spec spectra_ENSO_df['ENSO_fft'] = fft n_av = 5 # define terms to compute coherence between temp and ENSO t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av) e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b) # define colors t_color = 'cadetblue' s_color = 'darkslateblue' p_color = 'seagreen' e_color = 'steelblue' freq_ann = 2*np.pi/365.25 # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase' im_name = 'SIO_TempENSO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = e_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = e_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show() n_av = 5 # define terms to compute coherence between temp and ENSO #t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals #t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av) p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av) p_fft_star = np.conj(spectra_PDO_df['PDO_fft']) cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av) coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b) # plot the coherence and phase between ENSO and temperature tstr = 'SIO Temperature and PDO Index \nCoherence and Phase' im_name = 'SIO_TempPDO_CoherencePhase.jpg' NR = 2; NC = 1 fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7)) axes[0].semilogx(freq_av2,coh_sq2, color = p_color) axes[0].set_xlabel('$\omega$ (radians/day)') axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$') axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes) axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes) axes[1].semilogx(freq_av2, cospec_phase2, color = p_color) axes[1].set_xlabel('$\omega$ (radians/day)') axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees') axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes) axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes) axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5) axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes) fig.suptitle(tstr) # fig.tight_layout(pad=2.0) plt.savefig(path_out + im_name) plt.show()
[ 37811, 198, 198, 6601, 25, 34467, 290, 4849, 6269, 640, 2168, 422, 311, 9399, 1446, 14602, 82, 13762, 198, 220, 220, 220, 4849, 6269, 25, 8630, 287, 48189, 379, 262, 4417, 31034, 15, 13, 20, 76, 8, 290, 379, 6795, 31034, 20, 76, 8...
2.210482
4,751
from collections import namedtuple as Struct from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed']) DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
[ 6738, 17268, 1330, 3706, 83, 29291, 355, 32112, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 4912, 2484, 18137, 41205, 11, 911, 18137, 41205, 198, 198, 6601, 41205, 16934, 796, 32112, 10786, 6601, 41205, 16934, 3256, 37250, 12102, ...
3.207317
82
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch.nn as nn from mmcv.cnn import ConvModule from mmcv.runner import BaseModule from .make_divisible import make_divisible
[ 2, 15069, 357, 66, 8, 4946, 44, 5805, 397, 13, 1439, 2489, 10395, 13, 198, 11748, 8085, 33967, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 8085, 33967, 13, 66, 20471, 1330, 34872, 26796, 198, 6738, 8085, 33967, 13, 16737, ...
3.288136
59
from django.contrib import admin from .models import Images,Comments,Profile # Register your models here. admin.site.site_header='InstaPost Admin' admin.site.site_title='InstaPost Admin Dashboard' admin.site.register(Images,ImageInline) admin.site.register(Profile)
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 764, 27530, 1330, 5382, 11, 23903, 11, 37046, 198, 2, 17296, 534, 4981, 994, 13, 198, 220, 220, 220, 220, 198, 220, 220, 220, 220, 198, 28482, 13, 15654, 13, 15654, 62, 2567...
3.123596
89
import numpy as np _mandelbruh_GLOBAL_VARS = {}
[ 11748, 299, 32152, 355, 45941, 198, 198, 62, 22249, 417, 65, 622, 71, 62, 8763, 9864, 1847, 62, 53, 27415, 796, 23884, 198 ]
2.130435
23
""" recognize face landmark """ import json import os import requests import numpy as np FACE_POINTS = list(range(0, 83)) JAW_POINTS = list(range(0, 19)) LEFT_EYE_POINTS = list(range(19, 29)) LEFT_BROW_POINTS = list(range(29, 37)) MOUTH_POINTS = list(range(37, 55)) NOSE_POINTS = list(range(55, 65)) RIGHT_EYE_POINTS = list(range(65, 75)) RIGHT_BROW_POINTS = list(range(75, 83)) LEFT_FACE = list(range(0, 10)) + list(range(29, 34)) RIGHT_FACE = list(range(9, 19)) + list(range(75, 80)) JAW_END = 19 FACE_START = 0 FACE_END = 83 OVERLAY_POINTS = [ LEFT_FACE, RIGHT_FACE, JAW_POINTS, ]
[ 37811, 198, 26243, 1096, 1986, 20533, 198, 37811, 198, 11748, 33918, 198, 11748, 28686, 198, 198, 11748, 7007, 198, 11748, 299, 32152, 355, 45941, 198, 198, 49836, 62, 16402, 1268, 4694, 796, 1351, 7, 9521, 7, 15, 11, 9698, 4008, 198, ...
2.184116
277
import requests import time from bs4 import BeautifulSoup import re def get_mag_var(lat, lon, year, month, day, elev=0): """Returns the magnetic variation at a particulat point on earth. Keyword Arguments lat -- latitude (e.g. -180.6 deg) lon -- longitude (e.g. -34.6 deg) elev -- elevation in km (default 0.0) year -- year (e.g. 2015) month -- month (e.g. 11) day -- day (e.g. 30) Returns float -- magnetic variation """ (latd, latm, lats) = decdeg2dms(lat) (lond, lonm, lons) = decdeg2dms(lon) payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm, 'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'} url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh' # Sleep to avoid spamming server time.sleep(1) r = requests.get(url, params=payload) if r.status_code == 200: c = r.content soup = BeautifulSoup(c,'html.parser') deg_text = soup.find_all('b')[-1].text.strip() # strip out the junk so we have a number # Strip spaces before the search deg_text = deg_text.replace(" ","") deg = re.search(r'D=(.*?)deg', deg_text).group(1) deg = float(deg) return deg else: return 'something went wrong'
[ 11748, 7007, 198, 11748, 640, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198, 11748, 302, 198, 198, 4299, 651, 62, 19726, 62, 7785, 7, 15460, 11, 300, 261, 11, 614, 11, 1227, 11, 1110, 11, 7662, 28, 15, 2599, 198, 220, 220, ...
2.272408
569
"""Generated message classes for datacatalog version v1beta1. A fully managed and highly scalable data discovery and metadata management service. """ # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.protorpclite import messages as _messages from apitools.base.py import encoding package = 'datacatalog' encoding.AddCustomJsonFieldMapping( StandardQueryParameters, 'f__xgafv', '$.xgafv') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
[ 37811, 8645, 515, 3275, 6097, 329, 4818, 330, 10254, 519, 2196, 410, 16, 31361, 16, 13, 198, 198, 32, 3938, 5257, 290, 4047, 43865, 1366, 9412, 290, 20150, 4542, 198, 15271, 13, 198, 37811, 198, 2, 24550, 25, 770, 2393, 318, 1960, 5...
2.888
250
# Generated by Django 2.1.7 on 2019-04-22 21:08 from django.db import migrations
[ 2, 2980, 515, 416, 37770, 362, 13, 16, 13, 22, 319, 13130, 12, 3023, 12, 1828, 2310, 25, 2919, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 628 ]
2.766667
30
# -*- coding:utf-8 -*- # ------------------------ # written by Songjian Chen # 2018-10 # ------------------------ import os import skimage.io from skimage.color import rgb2gray import skimage.transform from scipy.io import loadmat import numpy as np import cv2 import math import warnings import random import torch import matplotlib.pyplot as plt warnings.filterwarnings("ignore") extract_test_data()
[ 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 220, 22369, 198, 2, 3194, 416, 10940, 73, 666, 12555, 198, 2, 2864, 12, 940, 198, 2, 220, 22369, 198, 198, 11748, 28686, 198, 11748, 1341, 9060, 13, 952, 198, 6738, ...
3.264
125
fruit='banana' x=len(fruit) print(x)
[ 34711, 11639, 3820, 2271, 6, 201, 198, 87, 28, 11925, 7, 34711, 8, 201, 198, 4798, 7, 87, 8 ]
2
19
# Generated by Django 3.1.7 on 2021-03-05 10:21 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 22, 319, 33448, 12, 3070, 12, 2713, 838, 25, 2481, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.84375
32
''' Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data ''' from pathlib import Path from textwrap import dedent from typing import Optional, Union, TypeVar from urllib.parse import unquote # TODO mm, make it easier to rememember to use... from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain # TODO potentially, belongs to my. package # TODO kython? T = TypeVar("T") # TODO move to common?
[ 7061, 6, 198, 5842, 274, 16410, 5450, 1378, 12567, 13, 785, 14, 36434, 666, 25119, 14, 660, 30536, 62, 1891, 929, 2, 961, 1326, 7131, 660, 30536, 62, 1891, 929, 11907, 6831, 329, 6218, 1366, 198, 7061, 6, 198, 198, 6738, 3108, 8019,...
3.138365
159
''' Created on Dec 27, 2019 @author: duane ''' DOLLAR = ord('$') LBRACE = ord('{') RBRACE = ord('}') LPAREN = ord('(') RPAREN = ord(')') def test_istr(): check(-1, -1, "") check(-1, -1, "a") check(-1, -1, "ab") check(-1, -1, "abc") check(-1, -1, "abcd") check(-1, -1, "abcde") check(-1, -1, "abcdef") check(0, 4, "${a}") check(0, 5, "${ab}") check(0, 6, "${abc}") check(0, 7, "${abcd}") check(1, 5, "a${a}") check(2, 6, "ab${a}") check(3, 7, "abc${a}") check(4, 8, "abcd${a}") check(5, 9, "abcde${a}") check(0, 4, "${a}a") check(0, 4, "${a}ab") check(0, 4, "${a}abc") check(0, 4, "${a}abcd") check(0, 4, "${a}abcde") dut = check(4, 8, "abcd${a}xyz") dut.replace(4, 8, "X") check2(-1, -1, None, dut) r = str(dut) print("Got: %s" % r) assert ("abcdXxyz" == str(dut)) # now nested tests dut = check(5, 9, "abc${${Y}}xyz") dut.replace(5, 9, "X") r = str(dut) assert (r == "abc${X}xyz") dut = check2(3, 7, "${X}", dut) dut.replace(3, 7, "ABC") s = str(dut) r = "abcABCxyz" assert (s == r) print("Success") if __name__ == '__main__': test_istr()
[ 7061, 6, 198, 41972, 319, 4280, 2681, 11, 13130, 198, 198, 31, 9800, 25, 7043, 1531, 198, 198, 7061, 6, 198, 198, 18227, 3069, 1503, 796, 2760, 10786, 3, 11537, 198, 43, 11473, 11598, 796, 2760, 10786, 90, 11537, 198, 49, 11473, 115...
1.800892
673
#!/usr/bin/env python3 """ CLI for Accessing Deenis """ # Standard Imports import sys from pathlib import Path # Module Imports import click # Path Fixes working_dir = Path(__file__).resolve().parent sys.path.append(str(working_dir)) # Project Imports from deenis import Deenis if __name__ == "__main__": add_records()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 198, 5097, 40, 329, 8798, 278, 1024, 268, 271, 198, 37811, 198, 2, 8997, 1846, 3742, 198, 11748, 25064, 198, 6738, 3108, 8019, 1330, 10644, 198, 198, 2, 19937, 1846, 3742, ...
2.844828
116
# coding: utf-8 from bigone import BigOneDog from common import gen_logger import logging import time import json def strategy_eth_big_bnc_eth(dog): """ BIG/ETH -> BIG/BNC -> ETH/BNC ETH/BNC -> BIG/BNC -> BIG/ETH :param dog: implemention of BigOneDog :return: """ big_eth_data = dog.get_order_book('BIG-ETH') big_bnc_data = dog.get_order_book('BIG-BNC') eth_bnc_data = dog.get_order_book('ETH-BNC') print('BIG-ETH') print('', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount']) print('', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount']) print('BIG-BNC') print('', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount']) print('', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount']) print('ETH-BNC') print('', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount']) print('', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount']) # positive transaction pos_anc = 0.999*0.999*0.999*\ ((1 / (float(big_eth_data['asks'][0]['price']))) * float(big_bnc_data['bids'][0]['price']) ) pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1 # negative transaction neg_anc = 0.999 * 0.999 * 0.999 * \ (float(eth_bnc_data['bids'][0]['price']) / float(big_bnc_data['asks'][0]['price']) * float(big_eth_data['asks'][0]['price'])) neg_anc = neg_anc / 1 - 1 flag = False amt = 2.0 if float(big_eth_data['asks'][0]['amount']) >= amt: if float(big_bnc_data['bids'][0]['amount']) >= amt: if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']): flag = True msg = "[:BIG/ETH -> BIG/BNC -> ETH/BNC]:" if pos_anc < 0.01: result = "1%, 0" logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) else: result = "1%" if flag is False: result = "{},{}".format(result,", 0") logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) else: result = "{},{}".format(result," 1") logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result)) print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt))) print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt))) print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'], str(amt * float(big_eth_data['asks'][0]['price'])))) # dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0') # dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0') # dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'], # str(2.0 * float(big_eth_data['asks'][0]['price']))) return True if neg_anc < 0.01: result = "1%, 0" else: result = "1%, 1" logger.info("[:ETH/BNC -> BIG/BNC -> BIG/ETH]: {0:.2f}%, {1}".format(neg_anc*100,result)) return False # return pos_anc, neg_anc if __name__ == '__main__': gen_logger('bigonetest') logger = logging.getLogger("bigone") with open("PRIVATE_KEY.json",'r') as f: private_key = json.load(f)["key"] dog = BigOneDog(private_key) # strategy_eth_bnc(dog) # dog.get_orders("ETH-BNC",'10') # r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d") # print(r) # r = dog.get_orders('ETH-BNC','10') # print(r) while True: flag = strategy_eth_big_bnc_eth(dog) if flag is True: break else: print("10") print("") time.sleep(10) # break # pos_anc, neg_anc = strategy_eth_bnc(dog) # if pos_anc < 0.01: # result = "1%, 0" # else: # result = "1%, 1" # # logger.info("[:BIG/ETH -> BIG/BNC -> ETH/BNC]: {0:.2f}%, {1}".format(pos_anc*100,result)) # # if neg_anc < 0.01: # result = "1%, 0" # else: # result = "1%, 1" # # logger.info("[:ETH/BNC -> BIG/BNC -> BIG/ETH]: {0:.2f}%, {1}".format(neg_anc*100,result)) # # print("10") # print("") # time.sleep(10)
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 6738, 1263, 505, 1330, 4403, 3198, 32942, 198, 6738, 2219, 1330, 2429, 62, 6404, 1362, 198, 198, 11748, 18931, 198, 11748, 640, 198, 11748, 33918, 628, 198, 4299, 4811, 62, 2788, 62, 14261, ...
1.902677
2,353
import os import yaml import logging import importlib os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' logging.getLogger('tensorflow').disabled = True from cifar_training_tools import cifar_training, cifar_error_test print('\n' + '#' * 19) print("TESTING FOR ERRORS!") print('#' * 19) stream = open('experiments.yaml', 'r') for exp in yaml.safe_load_all(stream): if 'skip_error_test' in exp and exp['skip_error_test']: continue model = getattr(importlib.import_module(exp['module']), exp['model']) cifar_error_test(model(**exp['model_parameters'])) print("OK!") print('\n' + '#' * 22) print("MODEL TRAINING BEGINS!") print('#' * 22) stream = open('experiments.yaml', 'r') for exp in yaml.safe_load_all(stream): print(); print_dict(exp); print(); model = getattr(importlib.import_module(exp['module']), exp['model']) cifar_training(model(**exp['model_parameters']), **exp['train_parameters'])
[ 11748, 28686, 198, 11748, 331, 43695, 198, 11748, 18931, 198, 11748, 1330, 8019, 198, 418, 13, 268, 2268, 17816, 10234, 62, 8697, 47, 62, 23678, 62, 25294, 62, 2538, 18697, 20520, 796, 705, 17, 6, 220, 198, 6404, 2667, 13, 1136, 11187...
2.422785
395
#!/usr/bin/python import sys import yaml import json if __name__ == '__main__': content = json.load(sys.stdin) print yaml.dump(content, indent=2, default_flow_style=False)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 198, 11748, 25064, 198, 11748, 331, 43695, 198, 11748, 33918, 198, 198, 361, 11593, 3672, 834, 6624, 705, 834, 12417, 834, 10354, 198, 220, 2695, 796, 33918, 13, 2220, 7, 17597, 13, 19282, 25...
2.712121
66
# -*- coding: utf-8 -*- import os,sys from PyQt4 import QtGui,QtCore dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata')) sys.path.append(dataRoot) import dataCenter as dataCenter from data.mongodb.DataSourceMongodb import Mongodb import datetime as dt if __name__ == '__main__': #app = QtGui.QApplication(sys.argv) #mid----------------------------------------------------------------------------------------------------------------------------- subMain() #mid----------------------------------------------------------------------------------------------------------------------------- #sys.exit(app.exec_())
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 28686, 11, 17597, 198, 6738, 9485, 48, 83, 19, 1330, 33734, 8205, 72, 11, 48, 83, 14055, 198, 198, 7890, 30016, 796, 28686, 13, 6978, 13, 397, 2777, 776, 7, 4...
3.144737
228
from joblib import Parallel, delayed from tqdm import tqdm from .processing import map, filter, split, expand, combine, join from .manipulation import windowed, flatten
[ 6738, 1693, 8019, 1330, 42945, 11, 11038, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 198, 6738, 764, 36948, 1330, 3975, 11, 8106, 11, 6626, 11, 4292, 11, 12082, 11, 4654, 198, 6738, 764, 805, 541, 1741, 1330, 4324, 276, 1...
3.5
52
P, R = input().split() if P == '0': print('C') elif R == '0': print('B') else: print('A')
[ 47, 11, 371, 796, 5128, 22446, 35312, 3419, 198, 198, 361, 350, 6624, 705, 15, 10354, 3601, 10786, 34, 11537, 198, 417, 361, 371, 6624, 705, 15, 10354, 3601, 10786, 33, 11537, 198, 17772, 25, 3601, 10786, 32, 11537 ]
2.307692
39
import pymongo import yaml import sched import time import json from castella import TweetCrawler if __name__ == "__main__": searcher = Castella() searcher.execute_search()
[ 11748, 279, 4948, 25162, 198, 11748, 331, 43695, 198, 11748, 6038, 198, 11748, 640, 198, 11748, 33918, 198, 6738, 3350, 12627, 1330, 18752, 34, 39464, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220...
3.137931
58
# # slice paddle model generator # import numpy as np from save_model import saveModel import paddle as pdpd import sys data_type = 'float32' if __name__ == "__main__": main()
[ 2, 198, 2, 16416, 39517, 2746, 17301, 198, 2, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 3613, 62, 19849, 1330, 3613, 17633, 198, 11748, 39517, 355, 279, 26059, 67, 198, 11748, 25064, 198, 198, 7890, 62, 4906, 796, 705, 22468, 262...
2.967213
61
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from tacker.sol_refactored.common import exceptions as sol_ex from tacker.sol_refactored import objects LOG = logging.getLogger(__name__) # not used at the moment # see IETF RFC 7396
[ 2, 15069, 357, 34, 8, 33448, 399, 3974, 261, 21821, 290, 44735, 10501, 198, 2, 1439, 6923, 33876, 13, 198, 2, 198, 2, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 345, 743, 19...
3.382576
264
# compatibility module for different python versions import sys if sys.version_info[:2] > (3, 0): PY2 = False PY3 = True Bytes = bytes Unicode = str basestring = str class_type_name = 'class' ClassType = type exception_module = 'builtins' new_class = type self_name = '__self__' from io import StringIO xrange = range else: PY2 = True PY3 = False Bytes = str Unicode = unicode basestring = basestring class_type_name = 'type' from types import ClassType exception_module = 'exceptions' from new import classobj as new_class self_name = 'im_self' from cStringIO import StringIO xrange = xrange
[ 2, 17764, 8265, 329, 1180, 21015, 6300, 198, 11748, 25064, 198, 198, 361, 25064, 13, 9641, 62, 10951, 58, 25, 17, 60, 1875, 357, 18, 11, 657, 2599, 628, 220, 220, 220, 350, 56, 17, 796, 10352, 198, 220, 220, 220, 350, 56, 18, 79...
2.429054
296
import re import unittest2 from google.appengine.ext import ndb from google.appengine.ext import testbed from consts.notification_type import NotificationType from helpers.event.event_test_creator import EventTestCreator from models.team import Team from models.notifications.match_score import MatchScoreNotification
[ 11748, 302, 198, 11748, 555, 715, 395, 17, 198, 198, 6738, 23645, 13, 1324, 18392, 13, 2302, 1330, 299, 9945, 198, 6738, 23645, 13, 1324, 18392, 13, 2302, 1330, 1332, 3077, 198, 198, 6738, 1500, 82, 13, 1662, 2649, 62, 4906, 1330, 4...
3.833333
84
md_template_d144 = """verbosity=0 xcFunctional=PBE FDtype=4th [Mesh] nx=160 ny=80 nz=80 [Domain] ox=0. oy=0. oz=0. lx=42.4813 ly=21.2406 lz=21.2406 [Potentials] pseudopotential=pseudo.D_tm_pbe [Poisson] solver=@ max_steps_initial=@50 max_steps=@50 reset=@ bcx=periodic bcy=periodic bcz=periodic [Run] type=MD [MD] type=@ num_steps=@ dt=@15. [XLBOMD] dissipation=@5 align=@ [Quench] max_steps=@5 max_steps_tight=@ atol=1.e-@10 num_lin_iterations=3 ortho_freq=100 [SpreadPenalty] type=@energy damping=@ target=@1.75 alpha=@0.01 [Orbitals] initial_type=Gaussian initial_width=1.5 overallocate_factor=@2. [ProjectedMatrices] solver=@short_sighted [LocalizationRegions] radius=@8. auxiliary_radius=@ move_tol=@0.1 [Restart] input_filename=wave.out input_level=3 interval=@ """ md_template_H2O_64 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=128 ny=128 nz=128 [Domain] ox=0. oy=0. oz=0. lx=23.4884 ly=23.4884 lz=23.4884 [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.D_ONCV_PBE_SG15 [Poisson] solver=@ max_steps=@ [Run] type=MD [Quench] max_steps=1000 atol=1.e-@ [MD] type=@ num_steps=@ dt=10. print_interval=5 [XLBOMD] dissipation=@ align=@ [Restart] input_filename=wave.out input_level=4 output_level=4 interval=@ """ quench_template_H2O_64 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=128 ny=128 nz=128 [Domain] ox=0. oy=0. oz=0. lx=23.4884 ly=23.4884 lz=23.4884 [Potentials] pseudopotential=pseudo.O_ONCV_PBE_SG15 pseudopotential=pseudo.D_ONCV_PBE_SG15 [Run] type=QUENCH [Quench] max_steps=1000 atol=1.e-8 [Orbitals] initial_type=Fourier [Restart] output_level=4 """ quench_template_d144 = """verbosity=1 xcFunctional=PBE FDtype=4th [Mesh] nx=160 ny=80 nz=80 [Domain] ox=0. oy=0. oz=0. lx=42.4813 ly=21.2406 lz=21.2406 [Potentials] pseudopotential=pseudo.D_tm_pbe [Poisson] solver=@ max_steps_initial=@50 max_steps=@50 bcx=periodic bcy=periodic bcz=periodic [Run] type=QUENCH [Quench] max_steps=200 atol=1.e-7 num_lin_iterations=3 ortho_freq=100 [SpreadPenalty] type=@energy damping=@ target=@1.75 alpha=@0.01 [Orbitals] initial_type=Gaussian initial_width=1.5 [ProjectedMatrices] solver=@short_sighted [LocalizationRegions] radius=@8. [Restart] output_type=distributed """ H2O_64_params={ 'nodes': '32', 'ntasks': '256', 'omp_num_threads': 8 if omp_num_threads == 4 else omp_num_threads, 'cores_per_task': '2', 'potentials': 'ln -s $maindir/potentials/pseudo.O_ONCV_PBE_SG15\nln -s $maindir/potentials/pseudo.D_ONCV_PBE_SG15', 'lrs': '', 'jobname': 'H2O_64', } d144_params={ 'nodes': '8', 'walltime': '01:30:00', 'ntasks': '125', 'omp_num_threads': omp_num_threads, 'cores_per_task': '1', 'potentials': 'ln -s $maindir/potentials/pseudo.D_tm_pbe', 'lrs': '-l lrs.in', 'jobname': 'd144', } vulcan_params={ 'queue': 'psmall', 'scratch_path': '/p/lscratchv/mgmolu/dunn27/mgmol/', 'gres': 'lscratchv', 'exe': 'mgmol-bgq', } cab_params={ 'queue': 'pbatch', 'scratch_path': '/p/lscratchd/dunn27/mgmol/', 'gres': 'lscratchd', 'omp_num_threads': '1', 'exe': 'mgmol-pel', 'walltime': '01:30:00', } runfile_quench_template="""#!/bin/tcsh #MSUB -l nodes={nodes},walltime={walltime} #MSUB -o mgmol.out #MSUB -q {queue} #MSUB -A comp #MSUB -l gres={gres} #MSUB -N {jobname} rm -f queued echo ' ' > running use boost-nompi-1.55.0 export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0 export Boost_NO_SYSTEM_PATHS=ON setenv OMP_NUM_THREADS {omp_num_threads} set ntasks = {ntasks} set maindir = $home/mgmol set exe = $maindir/bin/{exe} set datadir = `pwd` set scratchdir = {scratch_path}`basename $datadir` mkdir $scratchdir cd $scratchdir echo ' ' > running set cfg_quench = mgmol_quench.cfg cp $datadir/$cfg_quench . cp $datadir/coords.in . cp $datadir/lrs.in . {potentials} #1st run srun -n $ntasks -c {cores_per_task} $exe -c $cfg_quench -i coords.in {lrs} #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out rm -f running echo ' ' > queued """ runfile_md_template="""#!/bin/tcsh #MSUB -l nodes={nodes},walltime={walltime} #MSUB -o mgmol.out #MSUB -q {queue} #MSUB -A comp #MSUB -l gres={gres} #MSUB -N {jobname} rm -f queued echo ' ' > running use boost-nompi-1.55.0 export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0 export Boost_NO_SYSTEM_PATHS=ON setenv OMP_NUM_THREADS {omp_num_threads} set ntasks = {ntasks} set maindir = $home/mgmol set exe = $maindir/bin/{exe} set datadir = `pwd` set scratchdir = {scratch_path}`basename $datadir` mkdir $scratchdir cd $scratchdir echo ' ' > running set cfg_md = mgmol_md.cfg cp $datadir/$cfg_md . #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out #MD run srun -n $ntasks -c {cores_per_task} $exe -c $cfg_md #restart rm -f wave.out set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1` ln -s -f $restart_file wave.out rm -f running echo ' ' > queued """
[ 9132, 62, 28243, 62, 67, 18444, 796, 37227, 19011, 16579, 28, 15, 198, 25306, 22203, 282, 28, 47, 12473, 198, 26009, 4906, 28, 19, 400, 198, 58, 37031, 60, 198, 77, 87, 28, 14198, 198, 3281, 28, 1795, 198, 27305, 28, 1795, 198, 58...
1.917701
2,819
#! /usr/bin/env python import copy from copy import deepcopy import rospy import threading import quaternion import numpy as np from geometry_msgs.msg import Point from visualization_msgs.msg import * from franka_interface import ArmInterface from panda_robot import PandaArm import matplotlib.pyplot as plt from scipy.spatial.transform import Rotation np.set_printoptions(precision=2) """ This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance] To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force About the code/controller: 1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static 2] Due to the faulted joint velocities (read from rostopics), the more noisy, numerically derived derivatives of the joint position are prefered to be used in the controller { get_x_dot(..., numerically = True) } 3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter() - DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive 4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour. The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T) """ # --------- Constants ----------------------------- #print(robot.joint_ordered_angles()) #Read the robot's joint-angles #new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796} # Stiffness Kp = 30 Kpz = 30 #initial value (adaptive) Ko = 900 K = np.array([[Kp, 0, 0, 0, 0, 0], [0, Kp, 0, 0, 0, 0], [0, 0, Kpz, 0, 0, 0], [0, 0, 0, Ko, 0, 0], [0, 0, 0, 0, Ko, 0], [0, 0, 0, 0, 0, Ko]]) # Damping Bp = Kp/7 Bpz = Bp # #initial value (adaptive) Bo = 50 B = np.array([[Bp, 0, 0, 0, 0, 0], [0, Bp, 0, 0, 0, 0], [0, 0, Bpz, 0, 0, 0], [0, 0, 0, Bo, 0, 0], [0, 0, 0, 0, Bo, 0], [0, 0, 0, 0, 0, Bo]]) # Apparent inertia Mp = 10 Mo = 10 M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo]) M = np.diagflat(M_diag) # Constant matrices appearing in equation (50) of [Huang1992] K_v = np.identity(6) P = np.identity(6) gamma = np.identity(18) #gamma_M = 12 gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes) gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes) #gamma[2,2] = gamma_M gamma[8,8] = gamma_B gamma[14,14] = gamma_K duration = 15 #seconds SHOULD NOT BE ALTERED """Functions for generating desired MOTION trajectories""" #1 Generate a desired trajectory for the manipulator to follow #2 Generate a desired trajectory for the manipulator to follow #3 Generate a (time-consistent) desired motion trajectory """Functions for generating desired FORCE trajectories""" #1 Generate a desired force trajectory #2 Generate an efficient desired force trajectory #3 Generate a (time-consistent) desired force trajectory # ------------ Helper functions -------------------------------- # Calculate the numerical derivative of a each row in a vector # Saturation-function # Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it] # Return the external forces (everything except for z-force is set to 0 due to offsets) # Return the position and (relative) orientation # Return the linear and angular velocities # Numerically = True -> return the derivarive of the state-vector # Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment) # Return the error in position and orientation # Return the error in linear and angular velocities # Return the error in linear and angular acceleration # Return the cartesian (task-space) position # Compute difference between quaternions and return Euler angle in radians as difference # -------------- Main functions -------------------- # Get xi as it is described in equation (44) in [Huang1992] # Calculate lambda_dot as in equation (50) in [Huang1992] # Return the updated (adapted) Inertia, Damping and Stiffness matrices. # Calculate and perform the torque as in equation (10) in [Huang1992] """ TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller) [with geometrically consistent stiffness] """ """ TESTING AREA """ # -------------- Plotting ------------------------ if __name__ == "__main__": # ---------- Initialization ------------------- rospy.init_node("impedance_control") robot = PandaArm() publish_rate = 250 rate = rospy.Rate(publish_rate) T = 0.001*(1000/publish_rate) max_num_it = int(duration /T) #robot.move_to_joint_positions(new_start) robot.move_to_neutral() # List used to contain data needed for calculation of the torque output lam = np.zeros(18) v_history = np.zeros((6,max_num_it)) # Lists providing data for plotting p_history = np.zeros((3,max_num_it)) v_history_num = np.zeros((6,max_num_it)) x_history = np.zeros((6,max_num_it)) delta_x_history = np.zeros((6,max_num_it)) F_ext_history = np.zeros((6,max_num_it)) z_dynamics_history = np.zeros((3,max_num_it)) # Specify the desired behaviour of the robot x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True) goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run] Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation F_d = generate_F_d_tc(max_num_it,T) # ----------- The control loop ----------- for i in range(max_num_it): # update state-lists p_history[:,i] = get_p() x_history[:,i] = get_x(goal_ori) delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i]) F_ext_history[:,i] = get_F_ext() x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False' v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration # adapt M,B and K xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T) lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K) # Apply the resulting torque to the robot """CHOOSE ONE OF THE TWO CONTROLLERS BELOW""" perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori) #perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d) rate.sleep() # plotting and printing z_dynamics_history[0][i]=M_hat[2][2] z_dynamics_history[1][i]=B_hat[2][2] z_dynamics_history[2][i]=K_hat[2][2] # Live printing to screen when the controller is running if i%100 == 0: print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i]) print(K_hat[2][2]) print('') #Uncomment the block below to save plotting-data """ np.save('VIC_p_d.npy',p_d) np.save('VIC_p.npy',p_history) np.save('VIC_Fz_d.npy',F_d) np.save('VIC_Fz.npy',F_ext_history[2]) np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians np.save('VIC_adaptive_gains.npy',z_dynamics_history) """ plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 4866, 198, 6738, 4866, 1330, 2769, 30073, 198, 11748, 686, 2777, 88, 198, 11748, 4704, 278, 198, 11748, 627, 9205, 295, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 22939, 6...
2.516687
3,296
# Generated by Django 2.2.9 on 2020-01-28 14:50 import django.utils.timezone from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 362, 13, 17, 13, 24, 319, 12131, 12, 486, 12, 2078, 1478, 25, 1120, 198, 198, 11748, 42625, 14208, 13, 26791, 13, 2435, 11340, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.926829
41
""" Author: Mohammad Dehghani Ashkezari <mdehghan@uw.edu> Date: 2019-06-28 Function: Host a collection of shared multi-purpose helper functions. """ import os import sys from tqdm import tqdm from colorama import Fore, Back, Style, init import numpy as np import pandas as pd import webbrowser import IPython MAX_ROWS = 2000000 MAX_SAMPLE_SOURCE = 500000 def halt(msg): """Prints an error message and terminates the program.""" msg = '\n' + msg init(convert=True) print(Fore.RED + msg, file=sys.stderr) print(Style.RESET_ALL, end='') sys.exit(1) return def print_tqdm(msg, err=False): """Print helper function compatible with tqdmm progressbar.""" # init() msg = '\n' + msg if err: tqdm.write(Fore.RED + msg) else: tqdm.write(msg) tqdm.write(Style.RESET_ALL, end='') return def get_base_url(): """Returns API root endpoint.""" return os.environ.get( 'CMAP_API_BASE_URL', 'https://simonscmap.com').rstrip('/') def jupytered(): """Returns True if jupyter notebook has invoked the package.""" jup = False import __main__ as main if not hasattr(main, '__file__'): jup = True return jup def inline(): """ Checks if the package results should get prepared for an "inline" context. Currently, just calls the jupytered function. """ return jupytered() def make_filename_by_table_var(table, variable, prefix=''): """Generate a filename (without extention) using table and variable names.""" if prefix != '': prefix += '_' return prefix + variable + '_' + table def canvas_rect(dw, dh): """Resizes a canvas dimensions so that it better fits on client browser.""" ar = dw / dh h = 400 if ar > 3 else 500 w_min = 300 w_max = 1000 w = int(ar * h) if w > w_max: w = w_max if w < w_min: w = w_min return w, h def get_data_limits(data, quant=0.05): """Returns low and high quantile limits of a numeric array.""" data = np.array(data).flatten() return np.nanquantile(data, quant), np.nanquantile(data, 1-quant) # def get_token(token=None): # token = token or os.environ.get('CMAP_API_KEY') # if token in [None, '']: # halt('API Key must be specified to access CMAP API') # return token def config_path(): """Returns the path to the config spreadsheet file.""" return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.csv') def initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir): """Creates a .csv file hosting the primary project configs """ if vizEngine is None: vizEngine = 'plotly' if exportDir is None: exportDir = './export/' if exportFormat is None: exportFormat = '.csv' if figureDir is None: figureDir = './figure/' config = { 'token': [token], 'vizEngine': [vizEngine], 'exportDir': [exportDir], 'exportFormat': [exportFormat], 'figureDir': [figureDir] } pd.DataFrame(config).to_csv(config_path(), index=False) return def remove_angle_brackets(token): """Removes angle brackets at start and end of the token, if exist.""" if token is not None: if token[0] == '<': token = token[1:] if token[-1] == '>': token = token[:-1] return token def save_config(token=None, vizEngine=None, exportDir=None, exportFormat=None, figureDir=None): """Updates the project's configs at the config spreadsheet.""" configPath = config_path() if not os.path.isfile(configPath): initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir) df = pd.read_csv(configPath) if token is not None: df['token'] = remove_angle_brackets(token) if vizEngine is not None: supportedVizEngines = ['bokeh', 'plotly'] if vizEngine not in supportedVizEngines: halt('%s is not a supported visualization library' % vizEngine) df['vizEngine'] = vizEngine if exportDir is not None: df['exportDir'] = exportDir if exportFormat is not None: df['exportFormat'] = exportFormat if figureDir is not None: df['figureDir'] = figureDir df.to_csv(configPath, index=False) return def load_config(): """Loads the config spreadsheet and returns it as a dataframe.""" configPath = config_path() if not os.path.isfile(configPath): msg = '\nAPI key not found!\n' msg = msg + 'Please pass the API key using the following code:\n' msg = msg + 'import pycmap\n' msg = msg + 'pycmap.API(<api_key>)\n' halt(msg) return pd.read_csv(configPath) def get_token(): """Returns the API key.""" return remove_angle_brackets(load_config()['token'][0]) def get_vizEngine(): """Returns the visualization library name.""" return load_config()['vizEngine'][0] def get_export_dir(): """Returns the path to the export directory.""" return load_config()['exportDir'][0] def get_export_format(): """Returns the file format of the exported files.""" return load_config()['exportFormat'][0] def get_figure_dir(): """Returns the path to the figure directory.""" return load_config()['figureDir'][0] def get_bokeh_tools(): """Returns a list tools used along with a bokeh graph.""" return 'crosshair,pan,zoom_in,wheel_zoom,zoom_out,box_zoom,reset,save' def normalize(vals, min_max=False): """Takes an array and either normalize to min/max, standardize it (remove the mean and divide by standard deviation).""" if min_max: normalized_vals=(vals-np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals)) else: normalized_vals=(vals-np.nanmean(vals))/np.nanstd(vals) return normalized_vals def open_HTML(path): """Display HTML file by defaut browser or inline in case jupyter is the caller.""" if jupytered(): vObj = IPython.display.IFrame(path, width=800, height=400) IPython.display.display(vObj) else: path = 'file://' + os.path.realpath(path) webbrowser.open(path, new=2) return
[ 37811, 201, 198, 13838, 25, 29674, 1024, 71, 456, 3216, 7844, 365, 89, 2743, 1279, 76, 2934, 71, 6064, 31, 84, 86, 13, 15532, 29, 201, 198, 201, 198, 10430, 25, 13130, 12, 3312, 12, 2078, 201, 198, 201, 198, 22203, 25, 14504, 257,...
2.146626
3,260
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: api/v3/api_proto/projects.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from api.v3.api_proto import project_objects_pb2 as api_dot_v3_dot_api__proto_dot_project__objects__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='api/v3/api_proto/projects.proto', package='monorail.v3', syntax='proto3', serialized_options=b'Z!infra/monorailv2/api/v3/api_proto', create_key=_descriptor._internal_create_key, serialized_pb=b'\n\x1f\x61pi/v3/api_proto/projects.proto\x12\x0bmonorail.v3\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a&api/v3/api_proto/project_objects.proto\"t\n\x15\x43reateFieldDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12,\n\x08\x66ielddef\x18\x02 \x01(\x0b\x32\x15.monorail.v3.FieldDefB\x03\xe0\x41\x02\"J\n\x16GetComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\xe0\x41\x02\"\x81\x01\n\x19\x43reateComponentDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12\x35\n\rcomponent_def\x18\x02 \x01(\x0b\x32\x19.monorail.v3.ComponentDefB\x03\xe0\x41\x02\"M\n\x19\x44\x65leteComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xe0\x41\x02\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\"q\n\x19ListIssueTemplatesRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x1aListIssueTemplatesResponse\x12-\n\ttemplates\x18\x01 \x03(\x0b\x32\x1a.monorail.v3.IssueTemplate\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"p\n\x18ListComponentDefsRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x19ListComponentDefsResponse\x12\x31\n\x0e\x63omponent_defs\x18\x01 \x03(\x0b\x32\x19.monorail.v3.ComponentDef\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"<\n\x13ListProjectsRequest\x12\x11\n\tpage_size\x18\x01 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t\"W\n\x14ListProjectsResponse\x12&\n\x08projects\x18\x01 \x03(\x0b\x32\x14.monorail.v3.Project\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x87\x05\n\x08Projects\x12M\n\x0e\x43reateFieldDef\x12\".monorail.v3.CreateFieldDefRequest\x1a\x15.monorail.v3.FieldDef\"\x00\x12S\n\x0fGetComponentDef\x12#.monorail.v3.GetComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12Y\n\x12\x43reateComponentDef\x12&.monorail.v3.CreateComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12V\n\x12\x44\x65leteComponentDef\x12&.monorail.v3.DeleteComponentDefRequest\x1a\x16.google.protobuf.Empty\"\x00\x12g\n\x12ListIssueTemplates\x12&.monorail.v3.ListIssueTemplatesRequest\x1a\'.monorail.v3.ListIssueTemplatesResponse\"\x00\x12\x64\n\x11ListComponentDefs\x12%.monorail.v3.ListComponentDefsRequest\x1a&.monorail.v3.ListComponentDefsResponse\"\x00\x12U\n\x0cListProjects\x12 .monorail.v3.ListProjectsRequest\x1a!.monorail.v3.ListProjectsResponse\"\x00\x42#Z!infra/monorailv2/api/v3/api_protob\x06proto3' , dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,api_dot_v3_dot_api__proto_dot_project__objects__pb2.DESCRIPTOR,]) _CREATEFIELDDEFREQUEST = _descriptor.Descriptor( name='CreateFieldDefRequest', full_name='monorail.v3.CreateFieldDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.CreateFieldDefRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='fielddef', full_name='monorail.v3.CreateFieldDefRequest.fielddef', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=177, serialized_end=293, ) _GETCOMPONENTDEFREQUEST = _descriptor.Descriptor( name='GetComponentDefRequest', full_name='monorail.v3.GetComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='monorail.v3.GetComponentDefRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\034\n\032api.crbug.com/ComponentDef\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=295, serialized_end=369, ) _CREATECOMPONENTDEFREQUEST = _descriptor.Descriptor( name='CreateComponentDefRequest', full_name='monorail.v3.CreateComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.CreateComponentDefRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='component_def', full_name='monorail.v3.CreateComponentDefRequest.component_def', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=372, serialized_end=501, ) _DELETECOMPONENTDEFREQUEST = _descriptor.Descriptor( name='DeleteComponentDefRequest', full_name='monorail.v3.DeleteComponentDefRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='name', full_name='monorail.v3.DeleteComponentDefRequest.name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\340A\002\372A\034\n\032api.crbug.com/ComponentDef', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=503, serialized_end=580, ) _LISTISSUETEMPLATESREQUEST = _descriptor.Descriptor( name='ListIssueTemplatesRequest', full_name='monorail.v3.ListIssueTemplatesRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.ListIssueTemplatesRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListIssueTemplatesRequest.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListIssueTemplatesRequest.page_token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=582, serialized_end=695, ) _LISTISSUETEMPLATESRESPONSE = _descriptor.Descriptor( name='ListIssueTemplatesResponse', full_name='monorail.v3.ListIssueTemplatesResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='templates', full_name='monorail.v3.ListIssueTemplatesResponse.templates', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListIssueTemplatesResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=697, serialized_end=797, ) _LISTCOMPONENTDEFSREQUEST = _descriptor.Descriptor( name='ListComponentDefsRequest', full_name='monorail.v3.ListComponentDefsRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='parent', full_name='monorail.v3.ListComponentDefsRequest.parent', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListComponentDefsRequest.page_size', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListComponentDefsRequest.page_token', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=799, serialized_end=911, ) _LISTCOMPONENTDEFSRESPONSE = _descriptor.Descriptor( name='ListComponentDefsResponse', full_name='monorail.v3.ListComponentDefsResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='component_defs', full_name='monorail.v3.ListComponentDefsResponse.component_defs', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListComponentDefsResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=913, serialized_end=1016, ) _LISTPROJECTSREQUEST = _descriptor.Descriptor( name='ListProjectsRequest', full_name='monorail.v3.ListProjectsRequest', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='page_size', full_name='monorail.v3.ListProjectsRequest.page_size', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='page_token', full_name='monorail.v3.ListProjectsRequest.page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1018, serialized_end=1078, ) _LISTPROJECTSRESPONSE = _descriptor.Descriptor( name='ListProjectsResponse', full_name='monorail.v3.ListProjectsResponse', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name='projects', full_name='monorail.v3.ListProjectsResponse.projects', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='next_page_token', full_name='monorail.v3.ListProjectsResponse.next_page_token', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1080, serialized_end=1167, ) _CREATEFIELDDEFREQUEST.fields_by_name['fielddef'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF _CREATECOMPONENTDEFREQUEST.fields_by_name['component_def'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF _LISTISSUETEMPLATESRESPONSE.fields_by_name['templates'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._ISSUETEMPLATE _LISTCOMPONENTDEFSRESPONSE.fields_by_name['component_defs'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF _LISTPROJECTSRESPONSE.fields_by_name['projects'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._PROJECT DESCRIPTOR.message_types_by_name['CreateFieldDefRequest'] = _CREATEFIELDDEFREQUEST DESCRIPTOR.message_types_by_name['GetComponentDefRequest'] = _GETCOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['CreateComponentDefRequest'] = _CREATECOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['DeleteComponentDefRequest'] = _DELETECOMPONENTDEFREQUEST DESCRIPTOR.message_types_by_name['ListIssueTemplatesRequest'] = _LISTISSUETEMPLATESREQUEST DESCRIPTOR.message_types_by_name['ListIssueTemplatesResponse'] = _LISTISSUETEMPLATESRESPONSE DESCRIPTOR.message_types_by_name['ListComponentDefsRequest'] = _LISTCOMPONENTDEFSREQUEST DESCRIPTOR.message_types_by_name['ListComponentDefsResponse'] = _LISTCOMPONENTDEFSRESPONSE DESCRIPTOR.message_types_by_name['ListProjectsRequest'] = _LISTPROJECTSREQUEST DESCRIPTOR.message_types_by_name['ListProjectsResponse'] = _LISTPROJECTSRESPONSE _sym_db.RegisterFileDescriptor(DESCRIPTOR) CreateFieldDefRequest = _reflection.GeneratedProtocolMessageType('CreateFieldDefRequest', (_message.Message,), { 'DESCRIPTOR' : _CREATEFIELDDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.CreateFieldDefRequest) }) _sym_db.RegisterMessage(CreateFieldDefRequest) GetComponentDefRequest = _reflection.GeneratedProtocolMessageType('GetComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _GETCOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.GetComponentDefRequest) }) _sym_db.RegisterMessage(GetComponentDefRequest) CreateComponentDefRequest = _reflection.GeneratedProtocolMessageType('CreateComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _CREATECOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.CreateComponentDefRequest) }) _sym_db.RegisterMessage(CreateComponentDefRequest) DeleteComponentDefRequest = _reflection.GeneratedProtocolMessageType('DeleteComponentDefRequest', (_message.Message,), { 'DESCRIPTOR' : _DELETECOMPONENTDEFREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.DeleteComponentDefRequest) }) _sym_db.RegisterMessage(DeleteComponentDefRequest) ListIssueTemplatesRequest = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTISSUETEMPLATESREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesRequest) }) _sym_db.RegisterMessage(ListIssueTemplatesRequest) ListIssueTemplatesResponse = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTISSUETEMPLATESRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesResponse) }) _sym_db.RegisterMessage(ListIssueTemplatesResponse) ListComponentDefsRequest = _reflection.GeneratedProtocolMessageType('ListComponentDefsRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTCOMPONENTDEFSREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsRequest) }) _sym_db.RegisterMessage(ListComponentDefsRequest) ListComponentDefsResponse = _reflection.GeneratedProtocolMessageType('ListComponentDefsResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTCOMPONENTDEFSRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsResponse) }) _sym_db.RegisterMessage(ListComponentDefsResponse) ListProjectsRequest = _reflection.GeneratedProtocolMessageType('ListProjectsRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTPROJECTSREQUEST, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsRequest) }) _sym_db.RegisterMessage(ListProjectsRequest) ListProjectsResponse = _reflection.GeneratedProtocolMessageType('ListProjectsResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTPROJECTSRESPONSE, '__module__' : 'api.v3.api_proto.projects_pb2' # @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsResponse) }) _sym_db.RegisterMessage(ListProjectsResponse) DESCRIPTOR._options = None _CREATEFIELDDEFREQUEST.fields_by_name['parent']._options = None _CREATEFIELDDEFREQUEST.fields_by_name['fielddef']._options = None _GETCOMPONENTDEFREQUEST.fields_by_name['name']._options = None _CREATECOMPONENTDEFREQUEST.fields_by_name['parent']._options = None _CREATECOMPONENTDEFREQUEST.fields_by_name['component_def']._options = None _DELETECOMPONENTDEFREQUEST.fields_by_name['name']._options = None _LISTISSUETEMPLATESREQUEST.fields_by_name['parent']._options = None _LISTCOMPONENTDEFSREQUEST.fields_by_name['parent']._options = None _PROJECTS = _descriptor.ServiceDescriptor( name='Projects', full_name='monorail.v3.Projects', file=DESCRIPTOR, index=0, serialized_options=None, create_key=_descriptor._internal_create_key, serialized_start=1170, serialized_end=1817, methods=[ _descriptor.MethodDescriptor( name='CreateFieldDef', full_name='monorail.v3.Projects.CreateFieldDef', index=0, containing_service=None, input_type=_CREATEFIELDDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='GetComponentDef', full_name='monorail.v3.Projects.GetComponentDef', index=1, containing_service=None, input_type=_GETCOMPONENTDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='CreateComponentDef', full_name='monorail.v3.Projects.CreateComponentDef', index=2, containing_service=None, input_type=_CREATECOMPONENTDEFREQUEST, output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='DeleteComponentDef', full_name='monorail.v3.Projects.DeleteComponentDef', index=3, containing_service=None, input_type=_DELETECOMPONENTDEFREQUEST, output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListIssueTemplates', full_name='monorail.v3.Projects.ListIssueTemplates', index=4, containing_service=None, input_type=_LISTISSUETEMPLATESREQUEST, output_type=_LISTISSUETEMPLATESRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListComponentDefs', full_name='monorail.v3.Projects.ListComponentDefs', index=5, containing_service=None, input_type=_LISTCOMPONENTDEFSREQUEST, output_type=_LISTCOMPONENTDEFSRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name='ListProjects', full_name='monorail.v3.Projects.ListProjects', index=6, containing_service=None, input_type=_LISTPROJECTSREQUEST, output_type=_LISTPROJECTSRESPONSE, serialized_options=None, create_key=_descriptor._internal_create_key, ), ]) _sym_db.RegisterServiceDescriptor(_PROJECTS) DESCRIPTOR.services_by_name['Projects'] = _PROJECTS # @@protoc_insertion_point(module_scope)
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2980, 515, 416, 262, 8435, 11876, 17050, 13, 220, 8410, 5626, 48483, 0, 198, 2, 2723, 25, 40391, 14, 85, 18, 14, 15042, 62, 1676, 1462, 14, 42068, 13, 1676, 1462,...
2.464867
10,631
"""Testing Device operations.""" import json import unittest.mock as mock from click.testing import CliRunner import homeassistant_cli.cli as cli def test_device_list(default_devices) -> None: """Test Device List.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): runner = CliRunner() result = runner.invoke( cli.cli, ["--output=json", "device", "list"], catch_exceptions=False, ) assert result.exit_code == 0 data = json.loads(result.output) assert len(data) == 23 def test_device_list_filter(default_devices) -> None: """Test Device List.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): runner = CliRunner() result = runner.invoke( cli.cli, ["--output=json", "device", "list", "table"], catch_exceptions=False, ) assert result.exit_code == 0 data = json.loads(result.output) assert len(data) == 2 assert data[0]['name'] == "Kitchen table left" assert data[1]['name'] == "Kitchen table right" def test_device_assign(default_areas, default_devices) -> None: """Test basic device assign.""" with mock.patch( 'homeassistant_cli.remote.get_devices', return_value=default_devices ): with mock.patch( 'homeassistant_cli.remote.get_areas', return_value=default_areas ): with mock.patch( 'homeassistant_cli.remote.assign_area', return_value={'success': True}, ): runner = CliRunner() result = runner.invoke( cli.cli, ["device", "assign", "Kitchen", "Kitchen table left"], catch_exceptions=False, ) print(result.output) assert result.exit_code == 0 expected = ( "Successfully assigned 'Kitchen'" " to 'Kitchen table left'\n" ) assert result.output == expected
[ 37811, 44154, 16232, 4560, 526, 15931, 198, 198, 11748, 33918, 198, 11748, 555, 715, 395, 13, 76, 735, 355, 15290, 198, 198, 6738, 3904, 13, 33407, 1330, 1012, 72, 49493, 198, 11748, 1363, 562, 10167, 62, 44506, 13, 44506, 355, 537, 7...
2.099905
1,051
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog from PyQt5.QtGui import QIcon from PyQt5.QtCore import Qt import animations.general_animation as j3d from widgets.yaz0 import compress, compress_slow, compress_fast from io import BytesIO
[ 6738, 9485, 48, 83, 20, 13, 48, 83, 54, 312, 11407, 1330, 1195, 12502, 11, 1195, 27660, 38300, 11, 1195, 27660, 38300, 7449, 11, 1195, 8979, 44204, 198, 6738, 9485, 48, 83, 20, 13, 48, 83, 8205, 72, 1330, 220, 1195, 19578, 198, 67...
2.966667
90
#Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 ) letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r '] roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII'] military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo '] german = ['eins', 'zwei', 'drei', 'vier', 'fnf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwlf', 'dreizehn', 'vierzehn', 'fnfzehn', 'sechzehn', 'siebzehn', 'achtzehn'] pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 '] ##Build morse code sequences t = 'dot' s = 'dash' m1 = t, s, s, s, s m2 = t, t, s, s, s m3 = t, t, t, s, s m4 = t, t, t, t, s m5 = t, t, t, t, t m6 = s, t, t, t, t m7 = s, s, t, t, t m8 = s, s, s, t, t m9 = s, s, s, s, t m0 = s, s, s, s, s code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8] ##Other ideas: piglatin, japanese, spanish, prime, tau, e, ... ##NEED TO ADD INVALID ENTRY CATCHES print("Hello, let's sing a song that everybody loves!\n") sing = 'y' while sing == 'y': user = [] variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip() ##Seeming silly switching of strings to list types if variation == "numbers" or variation == "n": variation = numbers elif variation == "letters" or variation == "l": variation = letters elif variation == "roman" or variation == "r": variation = roman elif variation == "military" or variation == "m": variation = military elif variation == "pi" or variation == "p": variation = pi elif variation == "german" or variation == "g": variation = german elif variation == "code" or variation == "c": variation = code elif variation == "user" or variation == "u": while len(user) < 18: user.append(input ("Enter a word: ")) #User input to select the song pattern pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n") print ("\nHere we go: \n\n") #Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ song1 = "Oh, there are " song2 = " wheels on a big rig truck!" a = song1, variation[::], song2 b = song1, variation[::-1], song2 c = song1, variation[::2], song2 d = song1, variation[1::2], song2 ##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices. if pattern == 'forward' or pattern == 'f': print (a) elif pattern == 'backward' or pattern == 'b': print (b) elif pattern == 'odd' or pattern == 'o': print (c) elif pattern == 'even' or pattern == 'e': print (d) sing = input('\n\nWould you like to sing it again? (y/n) ').lower() ## This is the end of the while loop else: print ("\nOK, Goodbye!")
[ 2, 16447, 262, 662, 12, 23211, 3496, 3815, 290, 6565, 9633, 986, 42779, 3891, 407, 973, 523, 1123, 3599, 3850, 561, 307, 3748, 198, 198, 77, 17024, 796, 357, 16, 837, 17, 837, 18, 837, 19, 837, 20, 837, 21, 837, 22, 837, 23, 837...
2.347885
1,466
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io # All Rights Reserved # # conda is distributed under the terms of the BSD 3-clause license. # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. import os from os.path import dirname, join, exists import unittest import pytest import conda.config as config from conda.utils import get_yaml from conda.compat import iterkeys from tests.helpers import run_conda_command yaml = get_yaml() # use condarc from source tree to run these tests against config.rc_path = join(dirname(__file__), 'condarc') config.get_default_urls = _get_default_urls # unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of # repo.continuum.io try: del os.environ['CIO_TEST'] except KeyError: pass test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc') # Tests for the conda config command # FIXME This shoiuld be multiple individual tests # FIXME Break into multiple tests # FIXME Break into multiple tests # FIXME Break into multiple tests # FIXME Break into multiple tests def test_invalid_rc(): # Some tests for unexpected input in the condarc, like keys that are the # wrong type try: condarc = """\ channels: """ with open(test_condarc, 'w') as f: f.write(condarc) stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add', 'channels', 'test') assert stdout == '' assert stderr == """\ Error: Could not parse the yaml file. Use -f to use the yaml parser (this will remove any structure or comments from the existing .condarc file). Reason: key 'channels' should be a list, not NoneType.""" assert _read_test_condarc() == condarc os.unlink(test_condarc) finally: try: pass os.unlink(test_condarc) except OSError: pass def test_config_set(): # Test the config set command # Make sure it accepts only boolean values for boolean keys and any value for string keys try: stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'always_yes', 'yep') assert stdout == '' assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.' finally: try: os.unlink(test_condarc) except OSError: pass def test_set_rc_string(): # Test setting string keys in .condarc # We specifically test ssl_verify since it can be either a boolean or a string try: stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'ssl_verify', 'yes') assert stdout == '' assert stderr == '' verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify'] assert verify == 'yes' stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set', 'ssl_verify', 'test_string.crt') assert stdout == '' assert stderr == '' verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify'] assert verify == 'test_string.crt' os.unlink(test_condarc) finally: try: os.unlink(test_condarc) except OSError: pass
[ 2, 357, 66, 8, 2321, 12, 4967, 6389, 13814, 30437, 11, 3457, 13, 1220, 2638, 1378, 18487, 13814, 13, 952, 198, 2, 1439, 6923, 33876, 198, 2, 198, 2, 1779, 64, 318, 9387, 739, 262, 2846, 286, 262, 347, 10305, 513, 12, 565, 682, 5...
2.351279
1,486
# Bert has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model, # by Alex Wang, Kyunghyun Cho, NeuralGen 2019 # https://colab.research.google.com/drive/1MxKZGtQ9SSBjTK5ArsZ5LKhkztzg52RV # https://arxiv.org/abs/1902.04094 import tensorflow as tf import tensorflow_probability as tfp import numpy as np import math from malaya.text.bpe import merge_sentencepiece_tokens, merge_wordpiece_tokens CLS = '[CLS]' SEP = '[SEP]' MASK = '[MASK]'
[ 2, 22108, 468, 257, 44764, 11, 290, 632, 12039, 40802, 25, 347, 17395, 355, 257, 2940, 709, 14534, 7663, 15417, 9104, 11, 198, 2, 416, 4422, 15233, 11, 11118, 403, 456, 88, 403, 10031, 11, 47986, 13746, 13130, 198, 2, 3740, 1378, 40...
2.532609
184
""" [E] Given a sorted array, create a new array containing squares of all the number of the input array in the sorted order. Input: [-2, -1, 0, 2, 3] Output: [0, 1, 4, 4, 9] """ # Time: O(N) Space: O(n)
[ 628, 198, 37811, 198, 198, 58, 36, 60, 11259, 257, 23243, 7177, 11, 2251, 257, 649, 7177, 7268, 24438, 286, 477, 262, 220, 198, 17618, 286, 262, 5128, 7177, 287, 262, 23243, 1502, 13, 198, 198, 20560, 25, 25915, 17, 11, 532, 16, 1...
2.566265
83
import os from azureml.pipeline.steps import PythonScriptStep from azureml.core.runconfig import RunConfiguration from azureml.core.conda_dependencies import CondaDependencies from azureml.pipeline.core import PipelineData from azureml.pipeline.core import PipelineParameter from azureml.pipeline.steps import EstimatorStep from azureml.train.dnn import PyTorch def evaluate_step(model_dir, test_dir, compute_target): ''' This step evaluates the trained model on the testing data and outputs the accuracy. :param model_dir: The reference to the directory containing the trained model :type model_dir: DataReference :param test_dir: The reference to the directory containing the testing data :type test_dir: DataReference :param compute_target: The compute target to run the step on :type compute_target: ComputeTarget :return: The preprocess step, step outputs dictionary (keys: accuracy_file) :rtype: EstimatorStep, dict ''' accuracy_file = PipelineData( name='accuracy_file', pipeline_output_name='accuracy_file', datastore=test_dir.datastore, output_mode='mount', is_directory=False) outputs = [accuracy_file] outputs_map = { 'accuracy_file': accuracy_file } estimator = PyTorch( source_directory=os.path.dirname(os.path.abspath(__file__)), entry_script='evaluate.py', framework_version='1.3', compute_target=compute_target, use_gpu=True) step = EstimatorStep( name="Evaluate Model", estimator=estimator, estimator_entry_script_arguments=[ '--test_dir', test_dir, '--model_dir', model_dir, '--accuracy_file', accuracy_file ], inputs=[model_dir, test_dir], outputs=outputs, compute_target=compute_target, allow_reuse=True) return step, outputs_map
[ 11748, 28686, 198, 6738, 35560, 495, 4029, 13, 79, 541, 4470, 13, 20214, 1330, 11361, 7391, 8600, 198, 6738, 35560, 495, 4029, 13, 7295, 13, 5143, 11250, 1330, 5660, 38149, 198, 6738, 35560, 495, 4029, 13, 7295, 13, 66, 13533, 62, 458...
2.60462
736
import torch import argparse import os import sys import cv2 import time cfg = Configuration()
[ 11748, 28034, 198, 11748, 1822, 29572, 198, 11748, 28686, 198, 11748, 25064, 198, 11748, 269, 85, 17, 198, 11748, 640, 628, 628, 198, 37581, 796, 28373, 3419, 198 ]
3.535714
28
#!/usr/bin/python2.4 import httplib, urllib, sys # Define the parameters for the POST request and encode them in # a URL-safe format. params = urllib.urlencode([ #('js_code', sys.argv[1]), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'), ('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'), ('compilation_level', 'ADVANCED_OPTIMIZATIONS'), ('output_format', 'text'), ('output_info', 'warnings'), ]) # Always use the following value for the Content-type header. headers = { "Content-type": "application/x-www-form-urlencoded" } conn = httplib.HTTPConnection('closure-compiler.appspot.com') conn.request('POST', '/compile', params, headers) response = conn.getresponse() data = response.read() print data conn.close()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 17, 13, 19, 198, 198, 11748, 1841, 489, 571, 11, 2956, 297, 571, 11, 25064, 198, 198, 2, 2896, 500, 262, 10007, 329, 262, 24582, 2581, 290, 37773, 606, 287, 198, 2, 257, 10289, 12, 21230, 5794...
2.781609
348
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for tf_should_use.""" # pylint: disable=unused-import from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import gc import sys from tensorflow.python.framework import constant_op from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging from tensorflow.python.util import tf_should_use if __name__ == '__main__': test.main()
[ 2, 15069, 2177, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.875
296
# Copyright 2020 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Rules for importing and registering a local JDK.""" load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain") def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]): """Defines a java_runtime target together with Java runtime and compile toolchain definitions. Java runtime toolchain is constrained by flag --java_runtime_version having value set to either name or version argument. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. This requires a different configuration for JDK8 than the newer versions. Args: name: name of the target. java_home: Path to the JDK. version: Version of the JDK. runtime_name: name of java_runtime target if it already exists. visibility: Visibility that will be applied to the java runtime target """ if runtime_name == None: runtime_name = name native.java_runtime( name = runtime_name, java_home = java_home, visibility = visibility, ) native.config_setting( name = name + "_name_setting", values = {"java_runtime_version": name}, visibility = ["//visibility:private"], ) native.config_setting( name = name + "_version_setting", values = {"java_runtime_version": version}, visibility = ["//visibility:private"], ) native.config_setting( name = name + "_name_version_setting", values = {"java_runtime_version": name + "_" + version}, visibility = ["//visibility:private"], ) native.alias( name = name + "_settings_alias", actual = select({ name + "_name_setting": name + "_name_setting", name + "_version_setting": name + "_version_setting", "//conditions:default": name + "_name_version_setting", }), visibility = ["//visibility:private"], ) native.toolchain( name = "runtime_toolchain_definition", target_settings = [":%s_settings_alias" % name], toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type", toolchain = runtime_name, ) if version == "8": default_java_toolchain( name = name + "_toolchain_java8", configuration = JVM8_TOOLCHAIN_CONFIGURATION, source_version = version, target_version = version, java_runtime = runtime_name, ) elif type(version) == type("") and version.isdigit() and int(version) > 8: for version in range(8, int(version) + 1): default_java_toolchain( name = name + "_toolchain_java" + str(version), source_version = str(version), target_version = str(version), java_runtime = runtime_name, ) # else version is not recognized and no compilation toolchains are predefined def _local_java_repository_impl(repository_ctx): """Repository rule local_java_repository implementation. Args: repository_ctx: repository context """ java_home = repository_ctx.attr.java_home java_home_path = repository_ctx.path(java_home) if not java_home_path.exists: fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' + "does not exist." % (java_home, str(java_home_path))) repository_ctx.file( "WORKSPACE", "# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" + "workspace(name = \"{name}\")\n".format(name = repository_ctx.name), ) extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else "" java_bin = java_home_path.get_child("bin").get_child("java" + extension) if not java_bin.exists: # Java binary does not exist repository_ctx.file( "BUILD.bazel", _NOJDK_BUILD_TPL.format( local_jdk = repository_ctx.name, java_binary = "bin/java" + extension, java_home = java_home, ), False, ) return # Detect version version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin) # Prepare BUILD file using "local_java_runtime" macro build_file = "" if repository_ctx.attr.build_file != None: build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file)) runtime_name = '"jdk"' if repository_ctx.attr.build_file else None local_java_runtime_macro = """ local_java_runtime( name = "%s", runtime_name = %s, java_home = "%s", version = "%s", ) """ % (repository_ctx.name, runtime_name, java_home, version) repository_ctx.file( "BUILD.bazel", 'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' + build_file + local_java_runtime_macro, ) # Symlink all files for file in repository_ctx.path(java_home).readdir(): repository_ctx.symlink(file, file.basename) # Build file template, when JDK does not exist _NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule") fail_rule( name = "jdk", header = "Auto-Configuration Error:", message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " + "PATH or specify Java from remote repository (e.g. " + "--java_runtime_version=remotejdk_11") ) config_setting( name = "localjdk_setting", values = {{"java_runtime_version": "{local_jdk}"}}, visibility = ["//visibility:private"], ) toolchain( name = "runtime_toolchain_definition", target_settings = [":localjdk_setting"], toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type", toolchain = ":jdk", ) ''' _local_java_repository_rule = repository_rule( implementation = _local_java_repository_impl, local = True, configure = True, attrs = { "java_home": attr.string(), "version": attr.string(), "build_file": attr.label(), }, ) def local_java_repository(name, java_home, version = "", build_file = None): """Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain. Toolchain resolution is constrained with --java_runtime_version flag having value of the "name" or "version" parameter. Java compile toolchains are created for --java_language_version flags values between 8 and version (inclusive). Java compile toolchains use the same (local) JDK for compilation. If there is no JDK "virtual" targets are created, which fail only when actually needed. Args: name: A unique name for this rule. java_home: Location of the JDK imported. build_file: optionally BUILD file template version: optionally java version """ _local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file) native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
[ 2, 15069, 12131, 383, 347, 41319, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, ...
2.636394
2,995
from couchdbkit import ResourceNotFound from tastypie import fields as tp_f from corehq.apps.api.resources import JsonResource from corehq.apps.api.resources.v0_1 import ( CustomResourceMeta, RequirePermissionAuthentication, ) from corehq.apps.api.util import get_object_or_not_exist from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType from corehq.apps.users.models import Permissions
[ 6738, 18507, 9945, 15813, 1330, 20857, 3673, 21077, 198, 6738, 14854, 4464, 494, 1330, 7032, 355, 256, 79, 62, 69, 198, 6738, 4755, 71, 80, 13, 18211, 13, 15042, 13, 37540, 1330, 449, 1559, 26198, 198, 6738, 4755, 71, 80, 13, 18211, ...
3.12782
133
# -*- coding: utf-8 -*- """Define the cert_manager.domain.Domain unit tests.""" # Don't warn about things that happen as that is part of unit testing # pylint: disable=protected-access # pylint: disable=no-member import json from requests.exceptions import HTTPError from testtools import TestCase import responses from cert_manager.domain import Domain, DomainCreationResponseError from .lib.testbase import ClientFixture
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 7469, 500, 262, 5051, 62, 37153, 13, 27830, 13, 43961, 4326, 5254, 526, 15931, 198, 2, 2094, 470, 9828, 546, 1243, 326, 1645, 355, 326, 318, 636, 286, 4326, 485...
3.453125
128
"""Text parts.""" SEPARATOR = '----------------------------------' CONT_GAME = 'enter ' GREETING = ' '' ''!\n' \ ' , ' \ ' !' NAME_QUESTION = ' ?' CHOOSE_LEVEL = ' , ' \ ' . \n' \ '1 - \n' \ '2 - \n' \ '3 - ' INTRODUCTION = ' , \n' \ ' , \n' \ ' 10 . \n' \ ' . , , \n' \ ' . \n' \ ' , \n' \ ' . \n\n' \ ' - . \n\n' \ ' , \n' \ ' \n' \ ', . \n\n' \ '!!! ,\n' \ ' . !!!' ORACLE_QUESTION = ' . \n' \ ' \n' \ ' . ?\n' \ '----------------------------------\n'\ '1 - , \n' \ '2 - , ' ORACLE_QUESTION_1 = ' ? \n' \ '----------------------------------\n'\ '1 - , ! \n'\ '2 - ? (1 ) \n'\ '3 - ? (1 ) \n'\ '4 - ? (1 ) \n'\ '5 - (3 )' ORACLE_QUESTION_2 = ' ? \n' \ '----------------------------------\n'\ '1 - , ! \n'\ '2 - ? (1 ) \n'\ '3 - ? (1 ) \n'\ '4 - ? (1 )' GO_TAVERN_TEXT = '! \n' \ ', .' EXIT_QUESTION = ' ?\n' \ '----------------------------------\n'\ '1 - \n' \ '2 - ' SUCCESS_STEP = '! ! \n' \ ' .' FAILURE_STEP = ' , . \n' \ ' , \n' \ ' . !' WINNING = '! \n' \ ', ) \n' \ ' .' LOSING = ', . \n' \ ' . ! \n' \ ' .' NAMES = ['', '', '', '', ' ', '', '', '', '', '', '', '', '', '', '', '', '', '']
[ 37811, 8206, 3354, 526, 15931, 198, 5188, 27082, 25633, 796, 705, 3880, 438, 6, 198, 198, 37815, 62, 47109, 796, 705, 9255, 220, 220, 705, 198, 198, 28934, 2767, 2751, 796, 705, 220, 220, 220, 10148, 10148, 0, 59, 77, 6, 3467, 198, ...
1.38255
1,490
# Generated by Django 3.1.3 on 2021-01-07 00:42 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone
[ 2, 2980, 515, 416, 37770, 513, 13, 16, 13, 18, 319, 33448, 12, 486, 12, 2998, 3571, 25, 3682, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 142...
3.04918
61
import os import importlib
[ 11748, 28686, 198, 11748, 1330, 8019, 628 ]
4
7
from flask import Blueprint, Flask, send_from_directory from werkzeug.security import check_password_hash, generate_password_hash from app import db from app.mod_auth.forms import LoginForm from app.mod_auth.models import User mod_ecomm = Blueprint('products', __name__, url_prefix='/products', static_folder='../../frontend/build')
[ 6738, 42903, 1330, 39932, 11, 46947, 11, 3758, 62, 6738, 62, 34945, 201, 198, 6738, 266, 9587, 2736, 1018, 13, 12961, 1330, 2198, 62, 28712, 62, 17831, 11, 7716, 62, 28712, 62, 17831, 201, 198, 201, 198, 6738, 598, 1330, 20613, 201, ...
2.875
128
from .functions import monitor_watchlist_action, manager with manager.get_dagr(): monitor_watchlist_action()
[ 6738, 764, 12543, 2733, 1330, 5671, 62, 8340, 4868, 62, 2673, 11, 4706, 201, 198, 201, 198, 4480, 4706, 13, 1136, 62, 67, 363, 81, 33529, 201, 198, 220, 220, 220, 5671, 62, 8340, 4868, 62, 2673, 3419, 201, 198 ]
2.95
40
import pprint import logging from django.conf import settings from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from zenslackchat.message import handler from zenslackchat.models import SlackApp from zenslackchat.models import ZendeskApp
[ 11748, 279, 4798, 198, 11748, 18931, 198, 198, 6738, 42625, 14208, 13, 10414, 1330, 6460, 198, 6738, 1334, 62, 30604, 1330, 3722, 198, 6738, 1334, 62, 30604, 13, 33571, 1330, 3486, 3824, 769, 198, 6738, 1334, 62, 30604, 13, 26209, 1330,...
3.780488
82
# Copyright 2020 University Of Delhi. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Compute Related Checks """ import configparser import json import re import logging from tools.kube_utils import kube_exec, get_pod_with_labels from tools.conf import settings from internal import store_result ########### # Checks ########### def isolated_cores_check(): """ isolated_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_isolated_cores() required_value = required_isolated_cores() result = {'category': 'compute', 'case_name': 'isolated_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def reserved_vnf_cores_check(): """ reserved_vnf_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_reserved_vnf_cores() required_value = required_reserved_vnf_cores() result = {'category': 'compute', 'case_name': 'reserved_vnf_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_pmd_cores_check(): """ vswitch_pmd_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_vswitch_pmd_cores() required_value = required_vswitch_pmd_cores() result = {'category': 'compute', 'case_name': 'vswitch_pmd_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def vswitch_dpdk_lcores_check(): """ vswitch_dpdk_lcores_check """ logger = logging.getLogger(__name__) traced_value = trace_vswitch_dpdk_lcores() required_value = required_vswitch_dpdk_lcores() result = {'category': 'compute', 'case_name': 'vswitch_dpdk_lcores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def os_reserved_cores_check(): """ os_reserved_cores_check """ logger = logging.getLogger(__name__) traced_value = trace_os_reserved_cores() required_value = required_os_reserved_cores() result = {'category': 'compute', 'case_name': 'os_reserved_cores_check', 'details': {'traced_cores': traced_value, 'required_cores': required_value } } if is_ranges_equals(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def nova_scheduler_filters_check(): """ nova_scheduler_filters_check """ logger = logging.getLogger(__name__) traced_value = trace_nova_scheduler_filters() required_value = required_nova_scheduler_filters() result = {'category': 'compute', 'case_name': 'nova_scheduler_filters_check', 'details': {'traced_filters': traced_value, 'required_filters': required_value } } if are_lists_equal(traced_value, required_value): result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result def cpu_allocation_ratio_check(): """ cpu_allocation_ratio_check """ logger = logging.getLogger(__name__) traced_value = trace_cpu_allocation_ratio() required_value = required_cpu_allocation_ratio() result = {'category': 'compute', 'case_name': 'cpu_allocation_ratio_check', 'details': {'traced_ratio': traced_value, 'required_ratio': required_value } } if traced_value == required_value: result['criteria'] = 'pass' else: result['criteria'] = 'fail' store_result(logger, result) return result ############### # helper functions ############### def trace_isolated_cores(): """ Trace isolated_cores from Airship deployment :return: value traced from `isolcpus` key in `/proc/cmdline` """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/proc/cmdline'] proc_cmd = kube_exec(pod, cmd) for option in proc_cmd.split(): if 'isolcpus' in option: _, isolcpus_value = split_key_value(option) break return isolcpus_value def required_isolated_cores(): """ Returns value of `isolated_cpus` from platform_profile used by Role for worker nodes in PDF :return: isolated_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['isolated_cpus'] def trace_reserved_vnf_cores(): """ Trace vnf_reserved_cores from Airship deployment :return: value traced from `vcpu_pin_set` key in nova.conf of actual deployment """ try: config = get_nova_conf() vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): vcpu_pin_set = '' return vcpu_pin_set def required_reserved_vnf_cores(): """ Returns value of vnf_cores from platform_profile used by Role for worker nodes in PDF :return: vnf_reserverd_core value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vnf_cores'] def trace_vswitch_pmd_cores(): """ Trace vswitch_pmd_cores from Airship deployment :return: value traced from `other_config:pmd-cpu-mask` in openvswitchdb using ovs-vsctl """ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to json str match = re.findall("[a-zA-Z0-9-]+=", response) for key in match: response = response.replace(key, '"' + key[:-1] + '":') match = re.findall(":[a-zA-Z0-9-]+", response) for key in match: response = response.replace(key[1:], '"' + key[1:] + '"') config = json.loads(response) if 'pmd-cpu-mask' in config: pmd_cores = hex_to_comma_list(config['pmd-cpu-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_pmd_cores(): """ Returns value of vswitch_pmd_cores from platform_profile used by Role for worker nodes in PDF :return: vswitch_pmd_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_pmd_cores'] def trace_vswitch_dpdk_lcores(): """ Trace vswitch_dpdk_lcores from Airship deployment :return: value traced from `other_config:dpdk-lcore-mask` in openvswitchdb using ovs-vsctl """ ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd') cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config'] response = kube_exec(ovs_pod, cmd) # convert config str to json str match = re.findall("[a-zA-Z0-9-]+=", response) for key in match: response = response.replace(key, '"' + key[:-1] + '":') match = re.findall(":[a-zA-Z0-9-]+", response) for key in match: response = response.replace(key[1:], '"' + key[1:] + '"') config = json.loads(response) if 'dpdk-lcore-mask' in config: pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask']) else: pmd_cores = '' return pmd_cores def required_vswitch_dpdk_lcores(): """ Returns value of vswitch_dpdk_lcores from platform_profile used by Role for worker nodes in PDF :return: vswitch_dpdk_lcores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['vswitch_dpdk_lcores'] def trace_os_reserved_cores(): """ Trace os_reserved_cores from Airship deployment os_reserved_cores = all_cores - (reserved_vnf_cores + vswitch_pmd_cores + vswitch_dpdk_lcores) """ worker_role = settings.getValue('WORKER_ROLE_NAME') all_cores = get_cores_by_role(worker_role) reserved_vnf_cores = trace_reserved_vnf_cores() vswitch_pmd_cores = trace_vswitch_pmd_cores() vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores() non_os_cores = [] non_os_cores.extend(convert_range_to_list(reserved_vnf_cores)) non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores)) non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores)) os_reserved_cores = set(all_cores).difference(set(non_os_cores)) # return as string with comma separated value return ','.join(map(str, list(os_reserved_cores))) def required_os_reserved_cores(): """ Returns value of os_reserved_cores from platform_profile used by Role for worker nodes in PDF :return: os_reserved_cores value expected by the PDF """ worker_role = settings.getValue('WORKER_ROLE_NAME') profile = get_platform_profile_by_role(worker_role) return profile['os_reserved_cores'] def trace_nova_scheduler_filters(): """ Trace scheduler_filters from Airship deployment :return: value traced from `enabled_filters` key in nova.conf of actual deployment """ try: config = get_nova_conf() filters = config.get('filter_scheduler', 'enabled_filters') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): filters = '' filters = filters.split(',') map(str.strip, filters) return filters def required_nova_scheduler_filters(): """ Required nova scheduler_filters by the PDF """ pdf = settings.getValue('pdf_file') filters = pdf['vim_functional']['scheduler_filters'] filters = filters.split(',') map(str.strip, filters) return filters def trace_cpu_allocation_ratio(): """ Trace cpu_allocation_ratio from Airship deployment :return: value traced from `cpu_allocation_ratio` key in nova.conf of actual deployment """ try: config = get_nova_conf() cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio') except (configparser.NoOptionError, configparser.MissingSectionHeaderError): cpu_allocation_ratio = '' return float(cpu_allocation_ratio) def required_cpu_allocation_ratio(): """ Required cpu_allocation_ratio by the PDF """ pdf = settings.getValue('pdf_file') cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio'] return float(cpu_allocation_ratio) def get_role(role_name): """ Searches and returns role with `role_name` """ roles = settings.getValue('pdf_file')['roles'] for role in roles: if role['name'] == role_name: role_details = role return role_details def get_platform_profile(profile_name): """ Searches and returns platform_profile with `profile_name` """ platform_profiles = settings.getValue('pdf_file')['platform_profiles'] for profile in platform_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_processor_profile(profile_name): """ Searches and returns processor_profile with `profile_name` """ processor_profiles = settings.getValue('pdf_file')['processor_profiles'] for profile in processor_profiles: if profile['profile_name'] == profile_name: profile_details = profile return profile_details def get_platform_profile_by_role(role_name): """ Returns platform profile details of a role """ role = get_role(role_name) profile = get_platform_profile(role['platform_profile']) return profile def get_hardware_profile_by_role(role_name): """ Returns hardware profile details of a role """ role = get_role(role_name) hardware_profiles = settings.getValue('pdf_file')['hardware_profiles'] for profile in hardware_profiles: if profile['profile_name'] == role['hardware_profile']: profile_details = profile return profile_details def get_cores_by_role(role_name): """ Returns cpu cores list of server hardware used in the role """ hardware_profile = get_hardware_profile_by_role(role_name) processor_profile = hardware_profile['profile_info']['processor_profile'] profile = get_processor_profile(processor_profile) cpus = [] for numa in profile['profile_info']['numas']: cpus.extend(convert_range_to_list(numa['cpu_set'])) return cpus def get_nova_conf(): """ Returns parsed nova.conf """ pod = get_pod_with_labels('application=nova,component=compute') cmd = ['cat', '/etc/nova/nova.conf'] response = kube_exec(pod, cmd) config = configparser.ConfigParser() config.read_string(response) return config ### cpu cores related helper function def convert_range_to_list(x): """ Returns list of numbers from given range as string e.g.: convert_range_to_list('3-5') will give [3, 4, 5] """ # pylint: disable=C0103 result = [] for part in x.split(','): if '-' in part: a, b = part.split('-') a, b = int(a), int(b) result.extend(range(a, b + 1)) elif part != '': a = int(part) result.append(a) # remove duplicates result = list(dict.fromkeys(result)) return result def is_ranges_equals(range1, range2): """ Checks whether two ranges passed as string are equal e.g.: is_ranges_equals('2-5', '2-4,5') returns true """ set1 = set(convert_range_to_list(range1)) set2 = set(convert_range_to_list(range2)) return set1 == set2 def are_lists_equal(list1, list2): """ Checks whether two list are identicals """ set1 = set(list1) set2 = set(list2) return set1 == set2 def hex_to_comma_list(hex_mask): """ Converts CPU mask given in hex to list of cores """ binary = bin(int(hex_mask, 16))[2:] reversed_binary = binary[::-1] i = 0 output = "" for bit in reversed_binary: if bit == '1': output = output + str(i) + ',' i = i + 1 return output[:-1] def comma_list_to_hex(cpus): """ Converts a list of cpu cores in corresponding hex value of cpu-mask """ cpu_arr = cpus.split(",") binary_mask = 0 for cpu in cpu_arr: binary_mask = binary_mask | (1 << int(cpu)) return format(binary_mask, '02x') def split_key_value(key_value_str, delimiter='='): """ splits given string into key and value based on delimiter :param key_value_str: example string `someKey=somevalue` :param delimiter: default delimiter is `=` :return: [ key, value] """ key, value = key_value_str.split(delimiter) key = key.strip() value = value.strip() return key, value
[ 2, 15069, 12131, 2059, 3226, 12517, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921...
2.395289
7,048
""" Here you find either new implemented modules or alternate implementations of already modules. This directory is intended to have a second implementation beside the main implementation to have a discussion which implementation to favor on the long run. """
[ 37811, 198, 4342, 345, 1064, 2035, 649, 9177, 13103, 393, 13527, 25504, 220, 198, 1659, 1541, 13103, 13, 770, 8619, 318, 5292, 284, 423, 257, 1218, 7822, 198, 12636, 485, 262, 1388, 7822, 284, 423, 257, 5114, 543, 7822, 284, 220, 198,...
5.038462
52
''' Created on Mar 6, 2014 @author: tharanga ''' import unittest from time import sleep import EventService as es from EventService import WebSocketServer as ws from EventService import EventManager as em import socket from base64 import b64encode import struct import MySQLdb import json import EventService import flaskr import tempfile ##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE ##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES ##ASSISCIATED DATA IS NOT PROVIDED. HOST = '127.0.0.1' # The remote host PORT = 17322 suite = suite() runner = unittest.TextTestRunner(verbosity=3) runner.run(suite) # if __name__ == "__main__": # #import sys;sys.argv = ['', 'Test.testName'] # unittest.main()
[ 7061, 6, 198, 41972, 319, 1526, 718, 11, 1946, 198, 198, 31, 9800, 25, 294, 283, 16484, 198, 7061, 6, 198, 11748, 555, 715, 395, 198, 6738, 640, 1330, 3993, 198, 11748, 8558, 16177, 355, 1658, 198, 6738, 8558, 16177, 1330, 5313, 391...
2.619355
310
import os import threading import time import unittest from OpenDrive.client_side import file_changes_json as c_json from OpenDrive.client_side import interface from OpenDrive.client_side import main from OpenDrive.client_side import paths as client_paths from OpenDrive.server_side import paths as server_paths from tests.client_side.helper_client import h_register_dummy_user_device_client from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \ h_clear_init_all_folders, h_create_empty
[ 11748, 28686, 198, 11748, 4704, 278, 198, 11748, 640, 198, 11748, 555, 715, 395, 198, 198, 6738, 4946, 24825, 13, 16366, 62, 1589, 1330, 2393, 62, 36653, 62, 17752, 355, 269, 62, 17752, 198, 6738, 4946, 24825, 13, 16366, 62, 1589, 133...
3.302469
162
import os import numpy as np from skimage import io, data_dir from skimage._shared import testing from skimage._shared.testing import assert_array_equal one_by_one_jpeg = ( b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01' b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02' b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04' b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t' b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10' b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10' b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11' b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00' b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00' b'\x00?\x00*\x9f\xff\xd9' )
[ 11748, 28686, 198, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 9060, 1330, 33245, 11, 1366, 62, 15908, 198, 198, 6738, 1341, 9060, 13557, 28710, 1330, 4856, 198, 6738, 1341, 9060, 13557, 28710, 13, 33407, 1330, 6818, 62, 18747, ...
1.455611
597